##// END OF EJS Templates
localrepo: remove unused repo.branchtags()/_branchtip() methods
Brodie Rao -
r20195:4274eda1 default
parent child Browse files
Show More
@@ -1,2467 +1,2449 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock as lockmod
12 12 import transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 import branchmap, pathutil
20 20 propertycache = util.propertycache
21 21 filecache = scmutil.filecache
22 22
23 23 class repofilecache(filecache):
24 24 """All filecache usage on repo are done for logic that should be unfiltered
25 25 """
26 26
27 27 def __get__(self, repo, type=None):
28 28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 29 def __set__(self, repo, value):
30 30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 31 def __delete__(self, repo):
32 32 return super(repofilecache, self).__delete__(repo.unfiltered())
33 33
34 34 class storecache(repofilecache):
35 35 """filecache for files in the store"""
36 36 def join(self, obj, fname):
37 37 return obj.sjoin(fname)
38 38
39 39 class unfilteredpropertycache(propertycache):
40 40 """propertycache that apply to unfiltered repo only"""
41 41
42 42 def __get__(self, repo, type=None):
43 43 unfi = repo.unfiltered()
44 44 if unfi is repo:
45 45 return super(unfilteredpropertycache, self).__get__(unfi)
46 46 return getattr(unfi, self.name)
47 47
48 48 class filteredpropertycache(propertycache):
49 49 """propertycache that must take filtering in account"""
50 50
51 51 def cachevalue(self, obj, value):
52 52 object.__setattr__(obj, self.name, value)
53 53
54 54
55 55 def hasunfilteredcache(repo, name):
56 56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 57 return name in vars(repo.unfiltered())
58 58
59 59 def unfilteredmethod(orig):
60 60 """decorate method that always need to be run on unfiltered version"""
61 61 def wrapper(repo, *args, **kwargs):
62 62 return orig(repo.unfiltered(), *args, **kwargs)
63 63 return wrapper
64 64
65 65 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
66 66 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
67 67
68 68 class localpeer(peer.peerrepository):
69 69 '''peer for a local repo; reflects only the most recent API'''
70 70
71 71 def __init__(self, repo, caps=MODERNCAPS):
72 72 peer.peerrepository.__init__(self)
73 73 self._repo = repo.filtered('served')
74 74 self.ui = repo.ui
75 75 self._caps = repo._restrictcapabilities(caps)
76 76 self.requirements = repo.requirements
77 77 self.supportedformats = repo.supportedformats
78 78
79 79 def close(self):
80 80 self._repo.close()
81 81
82 82 def _capabilities(self):
83 83 return self._caps
84 84
85 85 def local(self):
86 86 return self._repo
87 87
88 88 def canpush(self):
89 89 return True
90 90
91 91 def url(self):
92 92 return self._repo.url()
93 93
94 94 def lookup(self, key):
95 95 return self._repo.lookup(key)
96 96
97 97 def branchmap(self):
98 98 return self._repo.branchmap()
99 99
100 100 def heads(self):
101 101 return self._repo.heads()
102 102
103 103 def known(self, nodes):
104 104 return self._repo.known(nodes)
105 105
106 106 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
107 107 return self._repo.getbundle(source, heads=heads, common=common,
108 108 bundlecaps=None)
109 109
110 110 # TODO We might want to move the next two calls into legacypeer and add
111 111 # unbundle instead.
112 112
113 113 def lock(self):
114 114 return self._repo.lock()
115 115
116 116 def addchangegroup(self, cg, source, url):
117 117 return self._repo.addchangegroup(cg, source, url)
118 118
119 119 def pushkey(self, namespace, key, old, new):
120 120 return self._repo.pushkey(namespace, key, old, new)
121 121
122 122 def listkeys(self, namespace):
123 123 return self._repo.listkeys(namespace)
124 124
125 125 def debugwireargs(self, one, two, three=None, four=None, five=None):
126 126 '''used to test argument passing over the wire'''
127 127 return "%s %s %s %s %s" % (one, two, three, four, five)
128 128
129 129 class locallegacypeer(localpeer):
130 130 '''peer extension which implements legacy methods too; used for tests with
131 131 restricted capabilities'''
132 132
133 133 def __init__(self, repo):
134 134 localpeer.__init__(self, repo, caps=LEGACYCAPS)
135 135
136 136 def branches(self, nodes):
137 137 return self._repo.branches(nodes)
138 138
139 139 def between(self, pairs):
140 140 return self._repo.between(pairs)
141 141
142 142 def changegroup(self, basenodes, source):
143 143 return self._repo.changegroup(basenodes, source)
144 144
145 145 def changegroupsubset(self, bases, heads, source):
146 146 return self._repo.changegroupsubset(bases, heads, source)
147 147
148 148 class localrepository(object):
149 149
150 150 supportedformats = set(('revlogv1', 'generaldelta'))
151 151 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
152 152 'dotencode'))
153 153 openerreqs = set(('revlogv1', 'generaldelta'))
154 154 requirements = ['revlogv1']
155 155 filtername = None
156 156
157 157 # a list of (ui, featureset) functions.
158 158 # only functions defined in module of enabled extensions are invoked
159 159 featuresetupfuncs = set()
160 160
161 161 def _baserequirements(self, create):
162 162 return self.requirements[:]
163 163
164 164 def __init__(self, baseui, path=None, create=False):
165 165 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
166 166 self.wopener = self.wvfs
167 167 self.root = self.wvfs.base
168 168 self.path = self.wvfs.join(".hg")
169 169 self.origroot = path
170 170 self.auditor = pathutil.pathauditor(self.root, self._checknested)
171 171 self.vfs = scmutil.vfs(self.path)
172 172 self.opener = self.vfs
173 173 self.baseui = baseui
174 174 self.ui = baseui.copy()
175 175 self.ui.copy = baseui.copy # prevent copying repo configuration
176 176 # A list of callback to shape the phase if no data were found.
177 177 # Callback are in the form: func(repo, roots) --> processed root.
178 178 # This list it to be filled by extension during repo setup
179 179 self._phasedefaults = []
180 180 try:
181 181 self.ui.readconfig(self.join("hgrc"), self.root)
182 182 extensions.loadall(self.ui)
183 183 except IOError:
184 184 pass
185 185
186 186 if self.featuresetupfuncs:
187 187 self.supported = set(self._basesupported) # use private copy
188 188 extmods = set(m.__name__ for n, m
189 189 in extensions.extensions(self.ui))
190 190 for setupfunc in self.featuresetupfuncs:
191 191 if setupfunc.__module__ in extmods:
192 192 setupfunc(self.ui, self.supported)
193 193 else:
194 194 self.supported = self._basesupported
195 195
196 196 if not self.vfs.isdir():
197 197 if create:
198 198 if not self.wvfs.exists():
199 199 self.wvfs.makedirs()
200 200 self.vfs.makedir(notindexed=True)
201 201 requirements = self._baserequirements(create)
202 202 if self.ui.configbool('format', 'usestore', True):
203 203 self.vfs.mkdir("store")
204 204 requirements.append("store")
205 205 if self.ui.configbool('format', 'usefncache', True):
206 206 requirements.append("fncache")
207 207 if self.ui.configbool('format', 'dotencode', True):
208 208 requirements.append('dotencode')
209 209 # create an invalid changelog
210 210 self.vfs.append(
211 211 "00changelog.i",
212 212 '\0\0\0\2' # represents revlogv2
213 213 ' dummy changelog to prevent using the old repo layout'
214 214 )
215 215 if self.ui.configbool('format', 'generaldelta', False):
216 216 requirements.append("generaldelta")
217 217 requirements = set(requirements)
218 218 else:
219 219 raise error.RepoError(_("repository %s not found") % path)
220 220 elif create:
221 221 raise error.RepoError(_("repository %s already exists") % path)
222 222 else:
223 223 try:
224 224 requirements = scmutil.readrequires(self.vfs, self.supported)
225 225 except IOError, inst:
226 226 if inst.errno != errno.ENOENT:
227 227 raise
228 228 requirements = set()
229 229
230 230 self.sharedpath = self.path
231 231 try:
232 232 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
233 233 realpath=True)
234 234 s = vfs.base
235 235 if not vfs.exists():
236 236 raise error.RepoError(
237 237 _('.hg/sharedpath points to nonexistent directory %s') % s)
238 238 self.sharedpath = s
239 239 except IOError, inst:
240 240 if inst.errno != errno.ENOENT:
241 241 raise
242 242
243 243 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
244 244 self.spath = self.store.path
245 245 self.svfs = self.store.vfs
246 246 self.sopener = self.svfs
247 247 self.sjoin = self.store.join
248 248 self.vfs.createmode = self.store.createmode
249 249 self._applyrequirements(requirements)
250 250 if create:
251 251 self._writerequirements()
252 252
253 253
254 254 self._branchcaches = {}
255 255 self.filterpats = {}
256 256 self._datafilters = {}
257 257 self._transref = self._lockref = self._wlockref = None
258 258
259 259 # A cache for various files under .hg/ that tracks file changes,
260 260 # (used by the filecache decorator)
261 261 #
262 262 # Maps a property name to its util.filecacheentry
263 263 self._filecache = {}
264 264
265 265 # hold sets of revision to be filtered
266 266 # should be cleared when something might have changed the filter value:
267 267 # - new changesets,
268 268 # - phase change,
269 269 # - new obsolescence marker,
270 270 # - working directory parent change,
271 271 # - bookmark changes
272 272 self.filteredrevcache = {}
273 273
274 274 def close(self):
275 275 pass
276 276
277 277 def _restrictcapabilities(self, caps):
278 278 return caps
279 279
280 280 def _applyrequirements(self, requirements):
281 281 self.requirements = requirements
282 282 self.sopener.options = dict((r, 1) for r in requirements
283 283 if r in self.openerreqs)
284 284 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
285 285 if chunkcachesize is not None:
286 286 self.sopener.options['chunkcachesize'] = chunkcachesize
287 287
288 288 def _writerequirements(self):
289 289 reqfile = self.opener("requires", "w")
290 290 for r in sorted(self.requirements):
291 291 reqfile.write("%s\n" % r)
292 292 reqfile.close()
293 293
294 294 def _checknested(self, path):
295 295 """Determine if path is a legal nested repository."""
296 296 if not path.startswith(self.root):
297 297 return False
298 298 subpath = path[len(self.root) + 1:]
299 299 normsubpath = util.pconvert(subpath)
300 300
301 301 # XXX: Checking against the current working copy is wrong in
302 302 # the sense that it can reject things like
303 303 #
304 304 # $ hg cat -r 10 sub/x.txt
305 305 #
306 306 # if sub/ is no longer a subrepository in the working copy
307 307 # parent revision.
308 308 #
309 309 # However, it can of course also allow things that would have
310 310 # been rejected before, such as the above cat command if sub/
311 311 # is a subrepository now, but was a normal directory before.
312 312 # The old path auditor would have rejected by mistake since it
313 313 # panics when it sees sub/.hg/.
314 314 #
315 315 # All in all, checking against the working copy seems sensible
316 316 # since we want to prevent access to nested repositories on
317 317 # the filesystem *now*.
318 318 ctx = self[None]
319 319 parts = util.splitpath(subpath)
320 320 while parts:
321 321 prefix = '/'.join(parts)
322 322 if prefix in ctx.substate:
323 323 if prefix == normsubpath:
324 324 return True
325 325 else:
326 326 sub = ctx.sub(prefix)
327 327 return sub.checknested(subpath[len(prefix) + 1:])
328 328 else:
329 329 parts.pop()
330 330 return False
331 331
332 332 def peer(self):
333 333 return localpeer(self) # not cached to avoid reference cycle
334 334
335 335 def unfiltered(self):
336 336 """Return unfiltered version of the repository
337 337
338 338 Intended to be overwritten by filtered repo."""
339 339 return self
340 340
341 341 def filtered(self, name):
342 342 """Return a filtered version of a repository"""
343 343 # build a new class with the mixin and the current class
344 344 # (possibly subclass of the repo)
345 345 class proxycls(repoview.repoview, self.unfiltered().__class__):
346 346 pass
347 347 return proxycls(self, name)
348 348
349 349 @repofilecache('bookmarks')
350 350 def _bookmarks(self):
351 351 return bookmarks.bmstore(self)
352 352
353 353 @repofilecache('bookmarks.current')
354 354 def _bookmarkcurrent(self):
355 355 return bookmarks.readcurrent(self)
356 356
357 357 def bookmarkheads(self, bookmark):
358 358 name = bookmark.split('@', 1)[0]
359 359 heads = []
360 360 for mark, n in self._bookmarks.iteritems():
361 361 if mark.split('@', 1)[0] == name:
362 362 heads.append(n)
363 363 return heads
364 364
365 365 @storecache('phaseroots')
366 366 def _phasecache(self):
367 367 return phases.phasecache(self, self._phasedefaults)
368 368
369 369 @storecache('obsstore')
370 370 def obsstore(self):
371 371 store = obsolete.obsstore(self.sopener)
372 372 if store and not obsolete._enabled:
373 373 # message is rare enough to not be translated
374 374 msg = 'obsolete feature not enabled but %i markers found!\n'
375 375 self.ui.warn(msg % len(list(store)))
376 376 return store
377 377
378 378 @storecache('00changelog.i')
379 379 def changelog(self):
380 380 c = changelog.changelog(self.sopener)
381 381 if 'HG_PENDING' in os.environ:
382 382 p = os.environ['HG_PENDING']
383 383 if p.startswith(self.root):
384 384 c.readpending('00changelog.i.a')
385 385 return c
386 386
387 387 @storecache('00manifest.i')
388 388 def manifest(self):
389 389 return manifest.manifest(self.sopener)
390 390
391 391 @repofilecache('dirstate')
392 392 def dirstate(self):
393 393 warned = [0]
394 394 def validate(node):
395 395 try:
396 396 self.changelog.rev(node)
397 397 return node
398 398 except error.LookupError:
399 399 if not warned[0]:
400 400 warned[0] = True
401 401 self.ui.warn(_("warning: ignoring unknown"
402 402 " working parent %s!\n") % short(node))
403 403 return nullid
404 404
405 405 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
406 406
407 407 def __getitem__(self, changeid):
408 408 if changeid is None:
409 409 return context.workingctx(self)
410 410 return context.changectx(self, changeid)
411 411
412 412 def __contains__(self, changeid):
413 413 try:
414 414 return bool(self.lookup(changeid))
415 415 except error.RepoLookupError:
416 416 return False
417 417
418 418 def __nonzero__(self):
419 419 return True
420 420
421 421 def __len__(self):
422 422 return len(self.changelog)
423 423
424 424 def __iter__(self):
425 425 return iter(self.changelog)
426 426
427 427 def revs(self, expr, *args):
428 428 '''Return a list of revisions matching the given revset'''
429 429 expr = revset.formatspec(expr, *args)
430 430 m = revset.match(None, expr)
431 431 return [r for r in m(self, list(self))]
432 432
433 433 def set(self, expr, *args):
434 434 '''
435 435 Yield a context for each matching revision, after doing arg
436 436 replacement via revset.formatspec
437 437 '''
438 438 for r in self.revs(expr, *args):
439 439 yield self[r]
440 440
441 441 def url(self):
442 442 return 'file:' + self.root
443 443
444 444 def hook(self, name, throw=False, **args):
445 445 return hook.hook(self.ui, self, name, throw, **args)
446 446
447 447 @unfilteredmethod
448 448 def _tag(self, names, node, message, local, user, date, extra={}):
449 449 if isinstance(names, str):
450 450 names = (names,)
451 451
452 452 branches = self.branchmap()
453 453 for name in names:
454 454 self.hook('pretag', throw=True, node=hex(node), tag=name,
455 455 local=local)
456 456 if name in branches:
457 457 self.ui.warn(_("warning: tag %s conflicts with existing"
458 458 " branch name\n") % name)
459 459
460 460 def writetags(fp, names, munge, prevtags):
461 461 fp.seek(0, 2)
462 462 if prevtags and prevtags[-1] != '\n':
463 463 fp.write('\n')
464 464 for name in names:
465 465 m = munge and munge(name) or name
466 466 if (self._tagscache.tagtypes and
467 467 name in self._tagscache.tagtypes):
468 468 old = self.tags().get(name, nullid)
469 469 fp.write('%s %s\n' % (hex(old), m))
470 470 fp.write('%s %s\n' % (hex(node), m))
471 471 fp.close()
472 472
473 473 prevtags = ''
474 474 if local:
475 475 try:
476 476 fp = self.opener('localtags', 'r+')
477 477 except IOError:
478 478 fp = self.opener('localtags', 'a')
479 479 else:
480 480 prevtags = fp.read()
481 481
482 482 # local tags are stored in the current charset
483 483 writetags(fp, names, None, prevtags)
484 484 for name in names:
485 485 self.hook('tag', node=hex(node), tag=name, local=local)
486 486 return
487 487
488 488 try:
489 489 fp = self.wfile('.hgtags', 'rb+')
490 490 except IOError, e:
491 491 if e.errno != errno.ENOENT:
492 492 raise
493 493 fp = self.wfile('.hgtags', 'ab')
494 494 else:
495 495 prevtags = fp.read()
496 496
497 497 # committed tags are stored in UTF-8
498 498 writetags(fp, names, encoding.fromlocal, prevtags)
499 499
500 500 fp.close()
501 501
502 502 self.invalidatecaches()
503 503
504 504 if '.hgtags' not in self.dirstate:
505 505 self[None].add(['.hgtags'])
506 506
507 507 m = matchmod.exact(self.root, '', ['.hgtags'])
508 508 tagnode = self.commit(message, user, date, extra=extra, match=m)
509 509
510 510 for name in names:
511 511 self.hook('tag', node=hex(node), tag=name, local=local)
512 512
513 513 return tagnode
514 514
515 515 def tag(self, names, node, message, local, user, date):
516 516 '''tag a revision with one or more symbolic names.
517 517
518 518 names is a list of strings or, when adding a single tag, names may be a
519 519 string.
520 520
521 521 if local is True, the tags are stored in a per-repository file.
522 522 otherwise, they are stored in the .hgtags file, and a new
523 523 changeset is committed with the change.
524 524
525 525 keyword arguments:
526 526
527 527 local: whether to store tags in non-version-controlled file
528 528 (default False)
529 529
530 530 message: commit message to use if committing
531 531
532 532 user: name of user to use if committing
533 533
534 534 date: date tuple to use if committing'''
535 535
536 536 if not local:
537 537 for x in self.status()[:5]:
538 538 if '.hgtags' in x:
539 539 raise util.Abort(_('working copy of .hgtags is changed '
540 540 '(please commit .hgtags manually)'))
541 541
542 542 self.tags() # instantiate the cache
543 543 self._tag(names, node, message, local, user, date)
544 544
545 545 @filteredpropertycache
546 546 def _tagscache(self):
547 547 '''Returns a tagscache object that contains various tags related
548 548 caches.'''
549 549
550 550 # This simplifies its cache management by having one decorated
551 551 # function (this one) and the rest simply fetch things from it.
552 552 class tagscache(object):
553 553 def __init__(self):
554 554 # These two define the set of tags for this repository. tags
555 555 # maps tag name to node; tagtypes maps tag name to 'global' or
556 556 # 'local'. (Global tags are defined by .hgtags across all
557 557 # heads, and local tags are defined in .hg/localtags.)
558 558 # They constitute the in-memory cache of tags.
559 559 self.tags = self.tagtypes = None
560 560
561 561 self.nodetagscache = self.tagslist = None
562 562
563 563 cache = tagscache()
564 564 cache.tags, cache.tagtypes = self._findtags()
565 565
566 566 return cache
567 567
568 568 def tags(self):
569 569 '''return a mapping of tag to node'''
570 570 t = {}
571 571 if self.changelog.filteredrevs:
572 572 tags, tt = self._findtags()
573 573 else:
574 574 tags = self._tagscache.tags
575 575 for k, v in tags.iteritems():
576 576 try:
577 577 # ignore tags to unknown nodes
578 578 self.changelog.rev(v)
579 579 t[k] = v
580 580 except (error.LookupError, ValueError):
581 581 pass
582 582 return t
583 583
584 584 def _findtags(self):
585 585 '''Do the hard work of finding tags. Return a pair of dicts
586 586 (tags, tagtypes) where tags maps tag name to node, and tagtypes
587 587 maps tag name to a string like \'global\' or \'local\'.
588 588 Subclasses or extensions are free to add their own tags, but
589 589 should be aware that the returned dicts will be retained for the
590 590 duration of the localrepo object.'''
591 591
592 592 # XXX what tagtype should subclasses/extensions use? Currently
593 593 # mq and bookmarks add tags, but do not set the tagtype at all.
594 594 # Should each extension invent its own tag type? Should there
595 595 # be one tagtype for all such "virtual" tags? Or is the status
596 596 # quo fine?
597 597
598 598 alltags = {} # map tag name to (node, hist)
599 599 tagtypes = {}
600 600
601 601 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
602 602 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
603 603
604 604 # Build the return dicts. Have to re-encode tag names because
605 605 # the tags module always uses UTF-8 (in order not to lose info
606 606 # writing to the cache), but the rest of Mercurial wants them in
607 607 # local encoding.
608 608 tags = {}
609 609 for (name, (node, hist)) in alltags.iteritems():
610 610 if node != nullid:
611 611 tags[encoding.tolocal(name)] = node
612 612 tags['tip'] = self.changelog.tip()
613 613 tagtypes = dict([(encoding.tolocal(name), value)
614 614 for (name, value) in tagtypes.iteritems()])
615 615 return (tags, tagtypes)
616 616
617 617 def tagtype(self, tagname):
618 618 '''
619 619 return the type of the given tag. result can be:
620 620
621 621 'local' : a local tag
622 622 'global' : a global tag
623 623 None : tag does not exist
624 624 '''
625 625
626 626 return self._tagscache.tagtypes.get(tagname)
627 627
628 628 def tagslist(self):
629 629 '''return a list of tags ordered by revision'''
630 630 if not self._tagscache.tagslist:
631 631 l = []
632 632 for t, n in self.tags().iteritems():
633 633 r = self.changelog.rev(n)
634 634 l.append((r, t, n))
635 635 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
636 636
637 637 return self._tagscache.tagslist
638 638
639 639 def nodetags(self, node):
640 640 '''return the tags associated with a node'''
641 641 if not self._tagscache.nodetagscache:
642 642 nodetagscache = {}
643 643 for t, n in self._tagscache.tags.iteritems():
644 644 nodetagscache.setdefault(n, []).append(t)
645 645 for tags in nodetagscache.itervalues():
646 646 tags.sort()
647 647 self._tagscache.nodetagscache = nodetagscache
648 648 return self._tagscache.nodetagscache.get(node, [])
649 649
650 650 def nodebookmarks(self, node):
651 651 marks = []
652 652 for bookmark, n in self._bookmarks.iteritems():
653 653 if n == node:
654 654 marks.append(bookmark)
655 655 return sorted(marks)
656 656
657 657 def branchmap(self):
658 658 '''returns a dictionary {branch: [branchheads]}'''
659 659 branchmap.updatecache(self)
660 660 return self._branchcaches[self.filtername]
661 661
662
663 def _branchtip(self, heads):
664 '''return the tipmost branch head in heads'''
665 tip = heads[-1]
666 for h in reversed(heads):
667 if not self[h].closesbranch():
668 tip = h
669 break
670 return tip
671
672 662 def branchtip(self, branch):
673 663 '''return the tip node for a given branch'''
674 664 try:
675 665 return self.branchmap().branchtip(branch)
676 666 except KeyError:
677 667 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
678 668
679 def branchtags(self):
680 '''return a dict where branch names map to the tipmost head of
681 the branch, open heads come before closed'''
682 bt = {}
683 for bn, heads in self.branchmap().iteritems():
684 bt[bn] = self._branchtip(heads)
685 return bt
686
687 669 def lookup(self, key):
688 670 return self[key].node()
689 671
690 672 def lookupbranch(self, key, remote=None):
691 673 repo = remote or self
692 674 if key in repo.branchmap():
693 675 return key
694 676
695 677 repo = (remote and remote.local()) and remote or self
696 678 return repo[key].branch()
697 679
698 680 def known(self, nodes):
699 681 nm = self.changelog.nodemap
700 682 pc = self._phasecache
701 683 result = []
702 684 for n in nodes:
703 685 r = nm.get(n)
704 686 resp = not (r is None or pc.phase(self, r) >= phases.secret)
705 687 result.append(resp)
706 688 return result
707 689
708 690 def local(self):
709 691 return self
710 692
711 693 def cancopy(self):
712 694 return self.local() # so statichttprepo's override of local() works
713 695
714 696 def join(self, f):
715 697 return os.path.join(self.path, f)
716 698
717 699 def wjoin(self, f):
718 700 return os.path.join(self.root, f)
719 701
720 702 def file(self, f):
721 703 if f[0] == '/':
722 704 f = f[1:]
723 705 return filelog.filelog(self.sopener, f)
724 706
725 707 def changectx(self, changeid):
726 708 return self[changeid]
727 709
728 710 def parents(self, changeid=None):
729 711 '''get list of changectxs for parents of changeid'''
730 712 return self[changeid].parents()
731 713
732 714 def setparents(self, p1, p2=nullid):
733 715 copies = self.dirstate.setparents(p1, p2)
734 716 pctx = self[p1]
735 717 if copies:
736 718 # Adjust copy records, the dirstate cannot do it, it
737 719 # requires access to parents manifests. Preserve them
738 720 # only for entries added to first parent.
739 721 for f in copies:
740 722 if f not in pctx and copies[f] in pctx:
741 723 self.dirstate.copy(copies[f], f)
742 724 if p2 == nullid:
743 725 for f, s in sorted(self.dirstate.copies().items()):
744 726 if f not in pctx and s not in pctx:
745 727 self.dirstate.copy(None, f)
746 728
747 729 def filectx(self, path, changeid=None, fileid=None):
748 730 """changeid can be a changeset revision, node, or tag.
749 731 fileid can be a file revision or node."""
750 732 return context.filectx(self, path, changeid, fileid)
751 733
752 734 def getcwd(self):
753 735 return self.dirstate.getcwd()
754 736
755 737 def pathto(self, f, cwd=None):
756 738 return self.dirstate.pathto(f, cwd)
757 739
758 740 def wfile(self, f, mode='r'):
759 741 return self.wopener(f, mode)
760 742
761 743 def _link(self, f):
762 744 return self.wvfs.islink(f)
763 745
764 746 def _loadfilter(self, filter):
765 747 if filter not in self.filterpats:
766 748 l = []
767 749 for pat, cmd in self.ui.configitems(filter):
768 750 if cmd == '!':
769 751 continue
770 752 mf = matchmod.match(self.root, '', [pat])
771 753 fn = None
772 754 params = cmd
773 755 for name, filterfn in self._datafilters.iteritems():
774 756 if cmd.startswith(name):
775 757 fn = filterfn
776 758 params = cmd[len(name):].lstrip()
777 759 break
778 760 if not fn:
779 761 fn = lambda s, c, **kwargs: util.filter(s, c)
780 762 # Wrap old filters not supporting keyword arguments
781 763 if not inspect.getargspec(fn)[2]:
782 764 oldfn = fn
783 765 fn = lambda s, c, **kwargs: oldfn(s, c)
784 766 l.append((mf, fn, params))
785 767 self.filterpats[filter] = l
786 768 return self.filterpats[filter]
787 769
788 770 def _filter(self, filterpats, filename, data):
789 771 for mf, fn, cmd in filterpats:
790 772 if mf(filename):
791 773 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
792 774 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
793 775 break
794 776
795 777 return data
796 778
797 779 @unfilteredpropertycache
798 780 def _encodefilterpats(self):
799 781 return self._loadfilter('encode')
800 782
801 783 @unfilteredpropertycache
802 784 def _decodefilterpats(self):
803 785 return self._loadfilter('decode')
804 786
805 787 def adddatafilter(self, name, filter):
806 788 self._datafilters[name] = filter
807 789
808 790 def wread(self, filename):
809 791 if self._link(filename):
810 792 data = self.wvfs.readlink(filename)
811 793 else:
812 794 data = self.wopener.read(filename)
813 795 return self._filter(self._encodefilterpats, filename, data)
814 796
815 797 def wwrite(self, filename, data, flags):
816 798 data = self._filter(self._decodefilterpats, filename, data)
817 799 if 'l' in flags:
818 800 self.wopener.symlink(data, filename)
819 801 else:
820 802 self.wopener.write(filename, data)
821 803 if 'x' in flags:
822 804 self.wvfs.setflags(filename, False, True)
823 805
824 806 def wwritedata(self, filename, data):
825 807 return self._filter(self._decodefilterpats, filename, data)
826 808
827 809 def transaction(self, desc, report=None):
828 810 tr = self._transref and self._transref() or None
829 811 if tr and tr.running():
830 812 return tr.nest()
831 813
832 814 # abort here if the journal already exists
833 815 if self.svfs.exists("journal"):
834 816 raise error.RepoError(
835 817 _("abandoned transaction found - run hg recover"))
836 818
837 819 self._writejournal(desc)
838 820 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
839 821 rp = report and report or self.ui.warn
840 822 tr = transaction.transaction(rp, self.sopener,
841 823 "journal",
842 824 aftertrans(renames),
843 825 self.store.createmode)
844 826 self._transref = weakref.ref(tr)
845 827 return tr
846 828
847 829 def _journalfiles(self):
848 830 return ((self.svfs, 'journal'),
849 831 (self.vfs, 'journal.dirstate'),
850 832 (self.vfs, 'journal.branch'),
851 833 (self.vfs, 'journal.desc'),
852 834 (self.vfs, 'journal.bookmarks'),
853 835 (self.svfs, 'journal.phaseroots'))
854 836
855 837 def undofiles(self):
856 838 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
857 839
858 840 def _writejournal(self, desc):
859 841 self.opener.write("journal.dirstate",
860 842 self.opener.tryread("dirstate"))
861 843 self.opener.write("journal.branch",
862 844 encoding.fromlocal(self.dirstate.branch()))
863 845 self.opener.write("journal.desc",
864 846 "%d\n%s\n" % (len(self), desc))
865 847 self.opener.write("journal.bookmarks",
866 848 self.opener.tryread("bookmarks"))
867 849 self.sopener.write("journal.phaseroots",
868 850 self.sopener.tryread("phaseroots"))
869 851
870 852 def recover(self):
871 853 lock = self.lock()
872 854 try:
873 855 if self.svfs.exists("journal"):
874 856 self.ui.status(_("rolling back interrupted transaction\n"))
875 857 transaction.rollback(self.sopener, "journal",
876 858 self.ui.warn)
877 859 self.invalidate()
878 860 return True
879 861 else:
880 862 self.ui.warn(_("no interrupted transaction available\n"))
881 863 return False
882 864 finally:
883 865 lock.release()
884 866
885 867 def rollback(self, dryrun=False, force=False):
886 868 wlock = lock = None
887 869 try:
888 870 wlock = self.wlock()
889 871 lock = self.lock()
890 872 if self.svfs.exists("undo"):
891 873 return self._rollback(dryrun, force)
892 874 else:
893 875 self.ui.warn(_("no rollback information available\n"))
894 876 return 1
895 877 finally:
896 878 release(lock, wlock)
897 879
898 880 @unfilteredmethod # Until we get smarter cache management
899 881 def _rollback(self, dryrun, force):
900 882 ui = self.ui
901 883 try:
902 884 args = self.opener.read('undo.desc').splitlines()
903 885 (oldlen, desc, detail) = (int(args[0]), args[1], None)
904 886 if len(args) >= 3:
905 887 detail = args[2]
906 888 oldtip = oldlen - 1
907 889
908 890 if detail and ui.verbose:
909 891 msg = (_('repository tip rolled back to revision %s'
910 892 ' (undo %s: %s)\n')
911 893 % (oldtip, desc, detail))
912 894 else:
913 895 msg = (_('repository tip rolled back to revision %s'
914 896 ' (undo %s)\n')
915 897 % (oldtip, desc))
916 898 except IOError:
917 899 msg = _('rolling back unknown transaction\n')
918 900 desc = None
919 901
920 902 if not force and self['.'] != self['tip'] and desc == 'commit':
921 903 raise util.Abort(
922 904 _('rollback of last commit while not checked out '
923 905 'may lose data'), hint=_('use -f to force'))
924 906
925 907 ui.status(msg)
926 908 if dryrun:
927 909 return 0
928 910
929 911 parents = self.dirstate.parents()
930 912 self.destroying()
931 913 transaction.rollback(self.sopener, 'undo', ui.warn)
932 914 if self.vfs.exists('undo.bookmarks'):
933 915 self.vfs.rename('undo.bookmarks', 'bookmarks')
934 916 if self.svfs.exists('undo.phaseroots'):
935 917 self.svfs.rename('undo.phaseroots', 'phaseroots')
936 918 self.invalidate()
937 919
938 920 parentgone = (parents[0] not in self.changelog.nodemap or
939 921 parents[1] not in self.changelog.nodemap)
940 922 if parentgone:
941 923 self.vfs.rename('undo.dirstate', 'dirstate')
942 924 try:
943 925 branch = self.opener.read('undo.branch')
944 926 self.dirstate.setbranch(encoding.tolocal(branch))
945 927 except IOError:
946 928 ui.warn(_('named branch could not be reset: '
947 929 'current branch is still \'%s\'\n')
948 930 % self.dirstate.branch())
949 931
950 932 self.dirstate.invalidate()
951 933 parents = tuple([p.rev() for p in self.parents()])
952 934 if len(parents) > 1:
953 935 ui.status(_('working directory now based on '
954 936 'revisions %d and %d\n') % parents)
955 937 else:
956 938 ui.status(_('working directory now based on '
957 939 'revision %d\n') % parents)
958 940 # TODO: if we know which new heads may result from this rollback, pass
959 941 # them to destroy(), which will prevent the branchhead cache from being
960 942 # invalidated.
961 943 self.destroyed()
962 944 return 0
963 945
964 946 def invalidatecaches(self):
965 947
966 948 if '_tagscache' in vars(self):
967 949 # can't use delattr on proxy
968 950 del self.__dict__['_tagscache']
969 951
970 952 self.unfiltered()._branchcaches.clear()
971 953 self.invalidatevolatilesets()
972 954
973 955 def invalidatevolatilesets(self):
974 956 self.filteredrevcache.clear()
975 957 obsolete.clearobscaches(self)
976 958
977 959 def invalidatedirstate(self):
978 960 '''Invalidates the dirstate, causing the next call to dirstate
979 961 to check if it was modified since the last time it was read,
980 962 rereading it if it has.
981 963
982 964 This is different to dirstate.invalidate() that it doesn't always
983 965 rereads the dirstate. Use dirstate.invalidate() if you want to
984 966 explicitly read the dirstate again (i.e. restoring it to a previous
985 967 known good state).'''
986 968 if hasunfilteredcache(self, 'dirstate'):
987 969 for k in self.dirstate._filecache:
988 970 try:
989 971 delattr(self.dirstate, k)
990 972 except AttributeError:
991 973 pass
992 974 delattr(self.unfiltered(), 'dirstate')
993 975
994 976 def invalidate(self):
995 977 unfiltered = self.unfiltered() # all file caches are stored unfiltered
996 978 for k in self._filecache:
997 979 # dirstate is invalidated separately in invalidatedirstate()
998 980 if k == 'dirstate':
999 981 continue
1000 982
1001 983 try:
1002 984 delattr(unfiltered, k)
1003 985 except AttributeError:
1004 986 pass
1005 987 self.invalidatecaches()
1006 988
1007 989 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1008 990 try:
1009 991 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1010 992 except error.LockHeld, inst:
1011 993 if not wait:
1012 994 raise
1013 995 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1014 996 (desc, inst.locker))
1015 997 # default to 600 seconds timeout
1016 998 l = lockmod.lock(vfs, lockname,
1017 999 int(self.ui.config("ui", "timeout", "600")),
1018 1000 releasefn, desc=desc)
1019 1001 if acquirefn:
1020 1002 acquirefn()
1021 1003 return l
1022 1004
1023 1005 def _afterlock(self, callback):
1024 1006 """add a callback to the current repository lock.
1025 1007
1026 1008 The callback will be executed on lock release."""
1027 1009 l = self._lockref and self._lockref()
1028 1010 if l:
1029 1011 l.postrelease.append(callback)
1030 1012 else:
1031 1013 callback()
1032 1014
1033 1015 def lock(self, wait=True):
1034 1016 '''Lock the repository store (.hg/store) and return a weak reference
1035 1017 to the lock. Use this before modifying the store (e.g. committing or
1036 1018 stripping). If you are opening a transaction, get a lock as well.)'''
1037 1019 l = self._lockref and self._lockref()
1038 1020 if l is not None and l.held:
1039 1021 l.lock()
1040 1022 return l
1041 1023
1042 1024 def unlock():
1043 1025 self.store.write()
1044 1026 if hasunfilteredcache(self, '_phasecache'):
1045 1027 self._phasecache.write()
1046 1028 for k, ce in self._filecache.items():
1047 1029 if k == 'dirstate' or k not in self.__dict__:
1048 1030 continue
1049 1031 ce.refresh()
1050 1032
1051 1033 l = self._lock(self.svfs, "lock", wait, unlock,
1052 1034 self.invalidate, _('repository %s') % self.origroot)
1053 1035 self._lockref = weakref.ref(l)
1054 1036 return l
1055 1037
1056 1038 def wlock(self, wait=True):
1057 1039 '''Lock the non-store parts of the repository (everything under
1058 1040 .hg except .hg/store) and return a weak reference to the lock.
1059 1041 Use this before modifying files in .hg.'''
1060 1042 l = self._wlockref and self._wlockref()
1061 1043 if l is not None and l.held:
1062 1044 l.lock()
1063 1045 return l
1064 1046
1065 1047 def unlock():
1066 1048 self.dirstate.write()
1067 1049 self._filecache['dirstate'].refresh()
1068 1050
1069 1051 l = self._lock(self.vfs, "wlock", wait, unlock,
1070 1052 self.invalidatedirstate, _('working directory of %s') %
1071 1053 self.origroot)
1072 1054 self._wlockref = weakref.ref(l)
1073 1055 return l
1074 1056
1075 1057 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1076 1058 """
1077 1059 commit an individual file as part of a larger transaction
1078 1060 """
1079 1061
1080 1062 fname = fctx.path()
1081 1063 text = fctx.data()
1082 1064 flog = self.file(fname)
1083 1065 fparent1 = manifest1.get(fname, nullid)
1084 1066 fparent2 = fparent2o = manifest2.get(fname, nullid)
1085 1067
1086 1068 meta = {}
1087 1069 copy = fctx.renamed()
1088 1070 if copy and copy[0] != fname:
1089 1071 # Mark the new revision of this file as a copy of another
1090 1072 # file. This copy data will effectively act as a parent
1091 1073 # of this new revision. If this is a merge, the first
1092 1074 # parent will be the nullid (meaning "look up the copy data")
1093 1075 # and the second one will be the other parent. For example:
1094 1076 #
1095 1077 # 0 --- 1 --- 3 rev1 changes file foo
1096 1078 # \ / rev2 renames foo to bar and changes it
1097 1079 # \- 2 -/ rev3 should have bar with all changes and
1098 1080 # should record that bar descends from
1099 1081 # bar in rev2 and foo in rev1
1100 1082 #
1101 1083 # this allows this merge to succeed:
1102 1084 #
1103 1085 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1104 1086 # \ / merging rev3 and rev4 should use bar@rev2
1105 1087 # \- 2 --- 4 as the merge base
1106 1088 #
1107 1089
1108 1090 cfname = copy[0]
1109 1091 crev = manifest1.get(cfname)
1110 1092 newfparent = fparent2
1111 1093
1112 1094 if manifest2: # branch merge
1113 1095 if fparent2 == nullid or crev is None: # copied on remote side
1114 1096 if cfname in manifest2:
1115 1097 crev = manifest2[cfname]
1116 1098 newfparent = fparent1
1117 1099
1118 1100 # find source in nearest ancestor if we've lost track
1119 1101 if not crev:
1120 1102 self.ui.debug(" %s: searching for copy revision for %s\n" %
1121 1103 (fname, cfname))
1122 1104 for ancestor in self[None].ancestors():
1123 1105 if cfname in ancestor:
1124 1106 crev = ancestor[cfname].filenode()
1125 1107 break
1126 1108
1127 1109 if crev:
1128 1110 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1129 1111 meta["copy"] = cfname
1130 1112 meta["copyrev"] = hex(crev)
1131 1113 fparent1, fparent2 = nullid, newfparent
1132 1114 else:
1133 1115 self.ui.warn(_("warning: can't find ancestor for '%s' "
1134 1116 "copied from '%s'!\n") % (fname, cfname))
1135 1117
1136 1118 elif fparent2 != nullid:
1137 1119 # is one parent an ancestor of the other?
1138 1120 fparentancestor = flog.ancestor(fparent1, fparent2)
1139 1121 if fparentancestor == fparent1:
1140 1122 fparent1, fparent2 = fparent2, nullid
1141 1123 elif fparentancestor == fparent2:
1142 1124 fparent2 = nullid
1143 1125
1144 1126 # is the file changed?
1145 1127 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1146 1128 changelist.append(fname)
1147 1129 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1148 1130
1149 1131 # are just the flags changed during merge?
1150 1132 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1151 1133 changelist.append(fname)
1152 1134
1153 1135 return fparent1
1154 1136
1155 1137 @unfilteredmethod
1156 1138 def commit(self, text="", user=None, date=None, match=None, force=False,
1157 1139 editor=False, extra={}):
1158 1140 """Add a new revision to current repository.
1159 1141
1160 1142 Revision information is gathered from the working directory,
1161 1143 match can be used to filter the committed files. If editor is
1162 1144 supplied, it is called to get a commit message.
1163 1145 """
1164 1146
1165 1147 def fail(f, msg):
1166 1148 raise util.Abort('%s: %s' % (f, msg))
1167 1149
1168 1150 if not match:
1169 1151 match = matchmod.always(self.root, '')
1170 1152
1171 1153 if not force:
1172 1154 vdirs = []
1173 1155 match.explicitdir = vdirs.append
1174 1156 match.bad = fail
1175 1157
1176 1158 wlock = self.wlock()
1177 1159 try:
1178 1160 wctx = self[None]
1179 1161 merge = len(wctx.parents()) > 1
1180 1162
1181 1163 if (not force and merge and match and
1182 1164 (match.files() or match.anypats())):
1183 1165 raise util.Abort(_('cannot partially commit a merge '
1184 1166 '(do not specify files or patterns)'))
1185 1167
1186 1168 changes = self.status(match=match, clean=force)
1187 1169 if force:
1188 1170 changes[0].extend(changes[6]) # mq may commit unchanged files
1189 1171
1190 1172 # check subrepos
1191 1173 subs = []
1192 1174 commitsubs = set()
1193 1175 newstate = wctx.substate.copy()
1194 1176 # only manage subrepos and .hgsubstate if .hgsub is present
1195 1177 if '.hgsub' in wctx:
1196 1178 # we'll decide whether to track this ourselves, thanks
1197 1179 if '.hgsubstate' in changes[0]:
1198 1180 changes[0].remove('.hgsubstate')
1199 1181 if '.hgsubstate' in changes[2]:
1200 1182 changes[2].remove('.hgsubstate')
1201 1183
1202 1184 # compare current state to last committed state
1203 1185 # build new substate based on last committed state
1204 1186 oldstate = wctx.p1().substate
1205 1187 for s in sorted(newstate.keys()):
1206 1188 if not match(s):
1207 1189 # ignore working copy, use old state if present
1208 1190 if s in oldstate:
1209 1191 newstate[s] = oldstate[s]
1210 1192 continue
1211 1193 if not force:
1212 1194 raise util.Abort(
1213 1195 _("commit with new subrepo %s excluded") % s)
1214 1196 if wctx.sub(s).dirty(True):
1215 1197 if not self.ui.configbool('ui', 'commitsubrepos'):
1216 1198 raise util.Abort(
1217 1199 _("uncommitted changes in subrepo %s") % s,
1218 1200 hint=_("use --subrepos for recursive commit"))
1219 1201 subs.append(s)
1220 1202 commitsubs.add(s)
1221 1203 else:
1222 1204 bs = wctx.sub(s).basestate()
1223 1205 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1224 1206 if oldstate.get(s, (None, None, None))[1] != bs:
1225 1207 subs.append(s)
1226 1208
1227 1209 # check for removed subrepos
1228 1210 for p in wctx.parents():
1229 1211 r = [s for s in p.substate if s not in newstate]
1230 1212 subs += [s for s in r if match(s)]
1231 1213 if subs:
1232 1214 if (not match('.hgsub') and
1233 1215 '.hgsub' in (wctx.modified() + wctx.added())):
1234 1216 raise util.Abort(
1235 1217 _("can't commit subrepos without .hgsub"))
1236 1218 changes[0].insert(0, '.hgsubstate')
1237 1219
1238 1220 elif '.hgsub' in changes[2]:
1239 1221 # clean up .hgsubstate when .hgsub is removed
1240 1222 if ('.hgsubstate' in wctx and
1241 1223 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1242 1224 changes[2].insert(0, '.hgsubstate')
1243 1225
1244 1226 # make sure all explicit patterns are matched
1245 1227 if not force and match.files():
1246 1228 matched = set(changes[0] + changes[1] + changes[2])
1247 1229
1248 1230 for f in match.files():
1249 1231 f = self.dirstate.normalize(f)
1250 1232 if f == '.' or f in matched or f in wctx.substate:
1251 1233 continue
1252 1234 if f in changes[3]: # missing
1253 1235 fail(f, _('file not found!'))
1254 1236 if f in vdirs: # visited directory
1255 1237 d = f + '/'
1256 1238 for mf in matched:
1257 1239 if mf.startswith(d):
1258 1240 break
1259 1241 else:
1260 1242 fail(f, _("no match under directory!"))
1261 1243 elif f not in self.dirstate:
1262 1244 fail(f, _("file not tracked!"))
1263 1245
1264 1246 cctx = context.workingctx(self, text, user, date, extra, changes)
1265 1247
1266 1248 if (not force and not extra.get("close") and not merge
1267 1249 and not cctx.files()
1268 1250 and wctx.branch() == wctx.p1().branch()):
1269 1251 return None
1270 1252
1271 1253 if merge and cctx.deleted():
1272 1254 raise util.Abort(_("cannot commit merge with missing files"))
1273 1255
1274 1256 ms = mergemod.mergestate(self)
1275 1257 for f in changes[0]:
1276 1258 if f in ms and ms[f] == 'u':
1277 1259 raise util.Abort(_("unresolved merge conflicts "
1278 1260 "(see hg help resolve)"))
1279 1261
1280 1262 if editor:
1281 1263 cctx._text = editor(self, cctx, subs)
1282 1264 edited = (text != cctx._text)
1283 1265
1284 1266 # commit subs and write new state
1285 1267 if subs:
1286 1268 for s in sorted(commitsubs):
1287 1269 sub = wctx.sub(s)
1288 1270 self.ui.status(_('committing subrepository %s\n') %
1289 1271 subrepo.subrelpath(sub))
1290 1272 sr = sub.commit(cctx._text, user, date)
1291 1273 newstate[s] = (newstate[s][0], sr)
1292 1274 subrepo.writestate(self, newstate)
1293 1275
1294 1276 # Save commit message in case this transaction gets rolled back
1295 1277 # (e.g. by a pretxncommit hook). Leave the content alone on
1296 1278 # the assumption that the user will use the same editor again.
1297 1279 msgfn = self.savecommitmessage(cctx._text)
1298 1280
1299 1281 p1, p2 = self.dirstate.parents()
1300 1282 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1301 1283 try:
1302 1284 self.hook("precommit", throw=True, parent1=hookp1,
1303 1285 parent2=hookp2)
1304 1286 ret = self.commitctx(cctx, True)
1305 1287 except: # re-raises
1306 1288 if edited:
1307 1289 self.ui.write(
1308 1290 _('note: commit message saved in %s\n') % msgfn)
1309 1291 raise
1310 1292
1311 1293 # update bookmarks, dirstate and mergestate
1312 1294 bookmarks.update(self, [p1, p2], ret)
1313 1295 cctx.markcommitted(ret)
1314 1296 ms.reset()
1315 1297 finally:
1316 1298 wlock.release()
1317 1299
1318 1300 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1319 1301 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1320 1302 self._afterlock(commithook)
1321 1303 return ret
1322 1304
1323 1305 @unfilteredmethod
1324 1306 def commitctx(self, ctx, error=False):
1325 1307 """Add a new revision to current repository.
1326 1308 Revision information is passed via the context argument.
1327 1309 """
1328 1310
1329 1311 tr = lock = None
1330 1312 removed = list(ctx.removed())
1331 1313 p1, p2 = ctx.p1(), ctx.p2()
1332 1314 user = ctx.user()
1333 1315
1334 1316 lock = self.lock()
1335 1317 try:
1336 1318 tr = self.transaction("commit")
1337 1319 trp = weakref.proxy(tr)
1338 1320
1339 1321 if ctx.files():
1340 1322 m1 = p1.manifest().copy()
1341 1323 m2 = p2.manifest()
1342 1324
1343 1325 # check in files
1344 1326 new = {}
1345 1327 changed = []
1346 1328 linkrev = len(self)
1347 1329 for f in sorted(ctx.modified() + ctx.added()):
1348 1330 self.ui.note(f + "\n")
1349 1331 try:
1350 1332 fctx = ctx[f]
1351 1333 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1352 1334 changed)
1353 1335 m1.set(f, fctx.flags())
1354 1336 except OSError, inst:
1355 1337 self.ui.warn(_("trouble committing %s!\n") % f)
1356 1338 raise
1357 1339 except IOError, inst:
1358 1340 errcode = getattr(inst, 'errno', errno.ENOENT)
1359 1341 if error or errcode and errcode != errno.ENOENT:
1360 1342 self.ui.warn(_("trouble committing %s!\n") % f)
1361 1343 raise
1362 1344 else:
1363 1345 removed.append(f)
1364 1346
1365 1347 # update manifest
1366 1348 m1.update(new)
1367 1349 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1368 1350 drop = [f for f in removed if f in m1]
1369 1351 for f in drop:
1370 1352 del m1[f]
1371 1353 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1372 1354 p2.manifestnode(), (new, drop))
1373 1355 files = changed + removed
1374 1356 else:
1375 1357 mn = p1.manifestnode()
1376 1358 files = []
1377 1359
1378 1360 # update changelog
1379 1361 self.changelog.delayupdate()
1380 1362 n = self.changelog.add(mn, files, ctx.description(),
1381 1363 trp, p1.node(), p2.node(),
1382 1364 user, ctx.date(), ctx.extra().copy())
1383 1365 p = lambda: self.changelog.writepending() and self.root or ""
1384 1366 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1385 1367 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1386 1368 parent2=xp2, pending=p)
1387 1369 self.changelog.finalize(trp)
1388 1370 # set the new commit is proper phase
1389 1371 targetphase = subrepo.newcommitphase(self.ui, ctx)
1390 1372 if targetphase:
1391 1373 # retract boundary do not alter parent changeset.
1392 1374 # if a parent have higher the resulting phase will
1393 1375 # be compliant anyway
1394 1376 #
1395 1377 # if minimal phase was 0 we don't need to retract anything
1396 1378 phases.retractboundary(self, targetphase, [n])
1397 1379 tr.close()
1398 1380 branchmap.updatecache(self.filtered('served'))
1399 1381 return n
1400 1382 finally:
1401 1383 if tr:
1402 1384 tr.release()
1403 1385 lock.release()
1404 1386
1405 1387 @unfilteredmethod
1406 1388 def destroying(self):
1407 1389 '''Inform the repository that nodes are about to be destroyed.
1408 1390 Intended for use by strip and rollback, so there's a common
1409 1391 place for anything that has to be done before destroying history.
1410 1392
1411 1393 This is mostly useful for saving state that is in memory and waiting
1412 1394 to be flushed when the current lock is released. Because a call to
1413 1395 destroyed is imminent, the repo will be invalidated causing those
1414 1396 changes to stay in memory (waiting for the next unlock), or vanish
1415 1397 completely.
1416 1398 '''
1417 1399 # When using the same lock to commit and strip, the phasecache is left
1418 1400 # dirty after committing. Then when we strip, the repo is invalidated,
1419 1401 # causing those changes to disappear.
1420 1402 if '_phasecache' in vars(self):
1421 1403 self._phasecache.write()
1422 1404
1423 1405 @unfilteredmethod
1424 1406 def destroyed(self):
1425 1407 '''Inform the repository that nodes have been destroyed.
1426 1408 Intended for use by strip and rollback, so there's a common
1427 1409 place for anything that has to be done after destroying history.
1428 1410 '''
1429 1411 # When one tries to:
1430 1412 # 1) destroy nodes thus calling this method (e.g. strip)
1431 1413 # 2) use phasecache somewhere (e.g. commit)
1432 1414 #
1433 1415 # then 2) will fail because the phasecache contains nodes that were
1434 1416 # removed. We can either remove phasecache from the filecache,
1435 1417 # causing it to reload next time it is accessed, or simply filter
1436 1418 # the removed nodes now and write the updated cache.
1437 1419 self._phasecache.filterunknown(self)
1438 1420 self._phasecache.write()
1439 1421
1440 1422 # update the 'served' branch cache to help read only server process
1441 1423 # Thanks to branchcache collaboration this is done from the nearest
1442 1424 # filtered subset and it is expected to be fast.
1443 1425 branchmap.updatecache(self.filtered('served'))
1444 1426
1445 1427 # Ensure the persistent tag cache is updated. Doing it now
1446 1428 # means that the tag cache only has to worry about destroyed
1447 1429 # heads immediately after a strip/rollback. That in turn
1448 1430 # guarantees that "cachetip == currenttip" (comparing both rev
1449 1431 # and node) always means no nodes have been added or destroyed.
1450 1432
1451 1433 # XXX this is suboptimal when qrefresh'ing: we strip the current
1452 1434 # head, refresh the tag cache, then immediately add a new head.
1453 1435 # But I think doing it this way is necessary for the "instant
1454 1436 # tag cache retrieval" case to work.
1455 1437 self.invalidate()
1456 1438
1457 1439 def walk(self, match, node=None):
1458 1440 '''
1459 1441 walk recursively through the directory tree or a given
1460 1442 changeset, finding all files matched by the match
1461 1443 function
1462 1444 '''
1463 1445 return self[node].walk(match)
1464 1446
1465 1447 def status(self, node1='.', node2=None, match=None,
1466 1448 ignored=False, clean=False, unknown=False,
1467 1449 listsubrepos=False):
1468 1450 """return status of files between two nodes or node and working
1469 1451 directory.
1470 1452
1471 1453 If node1 is None, use the first dirstate parent instead.
1472 1454 If node2 is None, compare node1 with working directory.
1473 1455 """
1474 1456
1475 1457 def mfmatches(ctx):
1476 1458 mf = ctx.manifest().copy()
1477 1459 if match.always():
1478 1460 return mf
1479 1461 for fn in mf.keys():
1480 1462 if not match(fn):
1481 1463 del mf[fn]
1482 1464 return mf
1483 1465
1484 1466 ctx1 = self[node1]
1485 1467 ctx2 = self[node2]
1486 1468
1487 1469 working = ctx2.rev() is None
1488 1470 parentworking = working and ctx1 == self['.']
1489 1471 match = match or matchmod.always(self.root, self.getcwd())
1490 1472 listignored, listclean, listunknown = ignored, clean, unknown
1491 1473
1492 1474 # load earliest manifest first for caching reasons
1493 1475 if not working and ctx2.rev() < ctx1.rev():
1494 1476 ctx2.manifest()
1495 1477
1496 1478 if not parentworking:
1497 1479 def bad(f, msg):
1498 1480 # 'f' may be a directory pattern from 'match.files()',
1499 1481 # so 'f not in ctx1' is not enough
1500 1482 if f not in ctx1 and f not in ctx1.dirs():
1501 1483 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1502 1484 match.bad = bad
1503 1485
1504 1486 if working: # we need to scan the working dir
1505 1487 subrepos = []
1506 1488 if '.hgsub' in self.dirstate:
1507 1489 subrepos = sorted(ctx2.substate)
1508 1490 s = self.dirstate.status(match, subrepos, listignored,
1509 1491 listclean, listunknown)
1510 1492 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1511 1493
1512 1494 # check for any possibly clean files
1513 1495 if parentworking and cmp:
1514 1496 fixup = []
1515 1497 # do a full compare of any files that might have changed
1516 1498 for f in sorted(cmp):
1517 1499 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1518 1500 or ctx1[f].cmp(ctx2[f])):
1519 1501 modified.append(f)
1520 1502 else:
1521 1503 fixup.append(f)
1522 1504
1523 1505 # update dirstate for files that are actually clean
1524 1506 if fixup:
1525 1507 if listclean:
1526 1508 clean += fixup
1527 1509
1528 1510 try:
1529 1511 # updating the dirstate is optional
1530 1512 # so we don't wait on the lock
1531 1513 wlock = self.wlock(False)
1532 1514 try:
1533 1515 for f in fixup:
1534 1516 self.dirstate.normal(f)
1535 1517 finally:
1536 1518 wlock.release()
1537 1519 except error.LockError:
1538 1520 pass
1539 1521
1540 1522 if not parentworking:
1541 1523 mf1 = mfmatches(ctx1)
1542 1524 if working:
1543 1525 # we are comparing working dir against non-parent
1544 1526 # generate a pseudo-manifest for the working dir
1545 1527 mf2 = mfmatches(self['.'])
1546 1528 for f in cmp + modified + added:
1547 1529 mf2[f] = None
1548 1530 mf2.set(f, ctx2.flags(f))
1549 1531 for f in removed:
1550 1532 if f in mf2:
1551 1533 del mf2[f]
1552 1534 else:
1553 1535 # we are comparing two revisions
1554 1536 deleted, unknown, ignored = [], [], []
1555 1537 mf2 = mfmatches(ctx2)
1556 1538
1557 1539 modified, added, clean = [], [], []
1558 1540 withflags = mf1.withflags() | mf2.withflags()
1559 1541 for fn, mf2node in mf2.iteritems():
1560 1542 if fn in mf1:
1561 1543 if (fn not in deleted and
1562 1544 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1563 1545 (mf1[fn] != mf2node and
1564 1546 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1565 1547 modified.append(fn)
1566 1548 elif listclean:
1567 1549 clean.append(fn)
1568 1550 del mf1[fn]
1569 1551 elif fn not in deleted:
1570 1552 added.append(fn)
1571 1553 removed = mf1.keys()
1572 1554
1573 1555 if working and modified and not self.dirstate._checklink:
1574 1556 # Symlink placeholders may get non-symlink-like contents
1575 1557 # via user error or dereferencing by NFS or Samba servers,
1576 1558 # so we filter out any placeholders that don't look like a
1577 1559 # symlink
1578 1560 sane = []
1579 1561 for f in modified:
1580 1562 if ctx2.flags(f) == 'l':
1581 1563 d = ctx2[f].data()
1582 1564 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1583 1565 self.ui.debug('ignoring suspect symlink placeholder'
1584 1566 ' "%s"\n' % f)
1585 1567 continue
1586 1568 sane.append(f)
1587 1569 modified = sane
1588 1570
1589 1571 r = modified, added, removed, deleted, unknown, ignored, clean
1590 1572
1591 1573 if listsubrepos:
1592 1574 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1593 1575 if working:
1594 1576 rev2 = None
1595 1577 else:
1596 1578 rev2 = ctx2.substate[subpath][1]
1597 1579 try:
1598 1580 submatch = matchmod.narrowmatcher(subpath, match)
1599 1581 s = sub.status(rev2, match=submatch, ignored=listignored,
1600 1582 clean=listclean, unknown=listunknown,
1601 1583 listsubrepos=True)
1602 1584 for rfiles, sfiles in zip(r, s):
1603 1585 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1604 1586 except error.LookupError:
1605 1587 self.ui.status(_("skipping missing subrepository: %s\n")
1606 1588 % subpath)
1607 1589
1608 1590 for l in r:
1609 1591 l.sort()
1610 1592 return r
1611 1593
1612 1594 def heads(self, start=None):
1613 1595 heads = self.changelog.heads(start)
1614 1596 # sort the output in rev descending order
1615 1597 return sorted(heads, key=self.changelog.rev, reverse=True)
1616 1598
1617 1599 def branchheads(self, branch=None, start=None, closed=False):
1618 1600 '''return a (possibly filtered) list of heads for the given branch
1619 1601
1620 1602 Heads are returned in topological order, from newest to oldest.
1621 1603 If branch is None, use the dirstate branch.
1622 1604 If start is not None, return only heads reachable from start.
1623 1605 If closed is True, return heads that are marked as closed as well.
1624 1606 '''
1625 1607 if branch is None:
1626 1608 branch = self[None].branch()
1627 1609 branches = self.branchmap()
1628 1610 if branch not in branches:
1629 1611 return []
1630 1612 # the cache returns heads ordered lowest to highest
1631 1613 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1632 1614 if start is not None:
1633 1615 # filter out the heads that cannot be reached from startrev
1634 1616 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1635 1617 bheads = [h for h in bheads if h in fbheads]
1636 1618 return bheads
1637 1619
1638 1620 def branches(self, nodes):
1639 1621 if not nodes:
1640 1622 nodes = [self.changelog.tip()]
1641 1623 b = []
1642 1624 for n in nodes:
1643 1625 t = n
1644 1626 while True:
1645 1627 p = self.changelog.parents(n)
1646 1628 if p[1] != nullid or p[0] == nullid:
1647 1629 b.append((t, n, p[0], p[1]))
1648 1630 break
1649 1631 n = p[0]
1650 1632 return b
1651 1633
1652 1634 def between(self, pairs):
1653 1635 r = []
1654 1636
1655 1637 for top, bottom in pairs:
1656 1638 n, l, i = top, [], 0
1657 1639 f = 1
1658 1640
1659 1641 while n != bottom and n != nullid:
1660 1642 p = self.changelog.parents(n)[0]
1661 1643 if i == f:
1662 1644 l.append(n)
1663 1645 f = f * 2
1664 1646 n = p
1665 1647 i += 1
1666 1648
1667 1649 r.append(l)
1668 1650
1669 1651 return r
1670 1652
1671 1653 def pull(self, remote, heads=None, force=False):
1672 1654 if remote.local():
1673 1655 missing = set(remote.requirements) - self.supported
1674 1656 if missing:
1675 1657 msg = _("required features are not"
1676 1658 " supported in the destination:"
1677 1659 " %s") % (', '.join(sorted(missing)))
1678 1660 raise util.Abort(msg)
1679 1661
1680 1662 # don't open transaction for nothing or you break future useful
1681 1663 # rollback call
1682 1664 tr = None
1683 1665 trname = 'pull\n' + util.hidepassword(remote.url())
1684 1666 lock = self.lock()
1685 1667 try:
1686 1668 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1687 1669 force=force)
1688 1670 common, fetch, rheads = tmp
1689 1671 if not fetch:
1690 1672 self.ui.status(_("no changes found\n"))
1691 1673 added = []
1692 1674 result = 0
1693 1675 else:
1694 1676 tr = self.transaction(trname)
1695 1677 if heads is None and list(common) == [nullid]:
1696 1678 self.ui.status(_("requesting all changes\n"))
1697 1679 elif heads is None and remote.capable('changegroupsubset'):
1698 1680 # issue1320, avoid a race if remote changed after discovery
1699 1681 heads = rheads
1700 1682
1701 1683 if remote.capable('getbundle'):
1702 1684 # TODO: get bundlecaps from remote
1703 1685 cg = remote.getbundle('pull', common=common,
1704 1686 heads=heads or rheads)
1705 1687 elif heads is None:
1706 1688 cg = remote.changegroup(fetch, 'pull')
1707 1689 elif not remote.capable('changegroupsubset'):
1708 1690 raise util.Abort(_("partial pull cannot be done because "
1709 1691 "other repository doesn't support "
1710 1692 "changegroupsubset."))
1711 1693 else:
1712 1694 cg = remote.changegroupsubset(fetch, heads, 'pull')
1713 1695 # we use unfiltered changelog here because hidden revision must
1714 1696 # be taken in account for phase synchronization. They may
1715 1697 # becomes public and becomes visible again.
1716 1698 cl = self.unfiltered().changelog
1717 1699 clstart = len(cl)
1718 1700 result = self.addchangegroup(cg, 'pull', remote.url())
1719 1701 clend = len(cl)
1720 1702 added = [cl.node(r) for r in xrange(clstart, clend)]
1721 1703
1722 1704 # compute target subset
1723 1705 if heads is None:
1724 1706 # We pulled every thing possible
1725 1707 # sync on everything common
1726 1708 subset = common + added
1727 1709 else:
1728 1710 # We pulled a specific subset
1729 1711 # sync on this subset
1730 1712 subset = heads
1731 1713
1732 1714 # Get remote phases data from remote
1733 1715 remotephases = remote.listkeys('phases')
1734 1716 publishing = bool(remotephases.get('publishing', False))
1735 1717 if remotephases and not publishing:
1736 1718 # remote is new and unpublishing
1737 1719 pheads, _dr = phases.analyzeremotephases(self, subset,
1738 1720 remotephases)
1739 1721 phases.advanceboundary(self, phases.public, pheads)
1740 1722 phases.advanceboundary(self, phases.draft, subset)
1741 1723 else:
1742 1724 # Remote is old or publishing all common changesets
1743 1725 # should be seen as public
1744 1726 phases.advanceboundary(self, phases.public, subset)
1745 1727
1746 1728 def gettransaction():
1747 1729 if tr is None:
1748 1730 return self.transaction(trname)
1749 1731 return tr
1750 1732
1751 1733 obstr = obsolete.syncpull(self, remote, gettransaction)
1752 1734 if obstr is not None:
1753 1735 tr = obstr
1754 1736
1755 1737 if tr is not None:
1756 1738 tr.close()
1757 1739 finally:
1758 1740 if tr is not None:
1759 1741 tr.release()
1760 1742 lock.release()
1761 1743
1762 1744 return result
1763 1745
1764 1746 def checkpush(self, force, revs):
1765 1747 """Extensions can override this function if additional checks have
1766 1748 to be performed before pushing, or call it if they override push
1767 1749 command.
1768 1750 """
1769 1751 pass
1770 1752
1771 1753 def push(self, remote, force=False, revs=None, newbranch=False):
1772 1754 '''Push outgoing changesets (limited by revs) from the current
1773 1755 repository to remote. Return an integer:
1774 1756 - None means nothing to push
1775 1757 - 0 means HTTP error
1776 1758 - 1 means we pushed and remote head count is unchanged *or*
1777 1759 we have outgoing changesets but refused to push
1778 1760 - other values as described by addchangegroup()
1779 1761 '''
1780 1762 if remote.local():
1781 1763 missing = set(self.requirements) - remote.local().supported
1782 1764 if missing:
1783 1765 msg = _("required features are not"
1784 1766 " supported in the destination:"
1785 1767 " %s") % (', '.join(sorted(missing)))
1786 1768 raise util.Abort(msg)
1787 1769
1788 1770 # there are two ways to push to remote repo:
1789 1771 #
1790 1772 # addchangegroup assumes local user can lock remote
1791 1773 # repo (local filesystem, old ssh servers).
1792 1774 #
1793 1775 # unbundle assumes local user cannot lock remote repo (new ssh
1794 1776 # servers, http servers).
1795 1777
1796 1778 if not remote.canpush():
1797 1779 raise util.Abort(_("destination does not support push"))
1798 1780 unfi = self.unfiltered()
1799 1781 def localphasemove(nodes, phase=phases.public):
1800 1782 """move <nodes> to <phase> in the local source repo"""
1801 1783 if locallock is not None:
1802 1784 phases.advanceboundary(self, phase, nodes)
1803 1785 else:
1804 1786 # repo is not locked, do not change any phases!
1805 1787 # Informs the user that phases should have been moved when
1806 1788 # applicable.
1807 1789 actualmoves = [n for n in nodes if phase < self[n].phase()]
1808 1790 phasestr = phases.phasenames[phase]
1809 1791 if actualmoves:
1810 1792 self.ui.status(_('cannot lock source repo, skipping local'
1811 1793 ' %s phase update\n') % phasestr)
1812 1794 # get local lock as we might write phase data
1813 1795 locallock = None
1814 1796 try:
1815 1797 locallock = self.lock()
1816 1798 except IOError, err:
1817 1799 if err.errno != errno.EACCES:
1818 1800 raise
1819 1801 # source repo cannot be locked.
1820 1802 # We do not abort the push, but just disable the local phase
1821 1803 # synchronisation.
1822 1804 msg = 'cannot lock source repository: %s\n' % err
1823 1805 self.ui.debug(msg)
1824 1806 try:
1825 1807 self.checkpush(force, revs)
1826 1808 lock = None
1827 1809 unbundle = remote.capable('unbundle')
1828 1810 if not unbundle:
1829 1811 lock = remote.lock()
1830 1812 try:
1831 1813 # discovery
1832 1814 fci = discovery.findcommonincoming
1833 1815 commoninc = fci(unfi, remote, force=force)
1834 1816 common, inc, remoteheads = commoninc
1835 1817 fco = discovery.findcommonoutgoing
1836 1818 outgoing = fco(unfi, remote, onlyheads=revs,
1837 1819 commoninc=commoninc, force=force)
1838 1820
1839 1821
1840 1822 if not outgoing.missing:
1841 1823 # nothing to push
1842 1824 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1843 1825 ret = None
1844 1826 else:
1845 1827 # something to push
1846 1828 if not force:
1847 1829 # if self.obsstore == False --> no obsolete
1848 1830 # then, save the iteration
1849 1831 if unfi.obsstore:
1850 1832 # this message are here for 80 char limit reason
1851 1833 mso = _("push includes obsolete changeset: %s!")
1852 1834 mst = "push includes %s changeset: %s!"
1853 1835 # plain versions for i18n tool to detect them
1854 1836 _("push includes unstable changeset: %s!")
1855 1837 _("push includes bumped changeset: %s!")
1856 1838 _("push includes divergent changeset: %s!")
1857 1839 # If we are to push if there is at least one
1858 1840 # obsolete or unstable changeset in missing, at
1859 1841 # least one of the missinghead will be obsolete or
1860 1842 # unstable. So checking heads only is ok
1861 1843 for node in outgoing.missingheads:
1862 1844 ctx = unfi[node]
1863 1845 if ctx.obsolete():
1864 1846 raise util.Abort(mso % ctx)
1865 1847 elif ctx.troubled():
1866 1848 raise util.Abort(_(mst)
1867 1849 % (ctx.troubles()[0],
1868 1850 ctx))
1869 1851 newbm = self.ui.configlist('bookmarks', 'pushing')
1870 1852 discovery.checkheads(unfi, remote, outgoing,
1871 1853 remoteheads, newbranch,
1872 1854 bool(inc), newbm)
1873 1855
1874 1856 # TODO: get bundlecaps from remote
1875 1857 bundlecaps = None
1876 1858 # create a changegroup from local
1877 1859 if revs is None and not outgoing.excluded:
1878 1860 # push everything,
1879 1861 # use the fast path, no race possible on push
1880 1862 bundler = changegroup.bundle10(self, bundlecaps)
1881 1863 cg = self._changegroupsubset(outgoing,
1882 1864 bundler,
1883 1865 'push',
1884 1866 fastpath=True)
1885 1867 else:
1886 1868 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1887 1869
1888 1870 # apply changegroup to remote
1889 1871 if unbundle:
1890 1872 # local repo finds heads on server, finds out what
1891 1873 # revs it must push. once revs transferred, if server
1892 1874 # finds it has different heads (someone else won
1893 1875 # commit/push race), server aborts.
1894 1876 if force:
1895 1877 remoteheads = ['force']
1896 1878 # ssh: return remote's addchangegroup()
1897 1879 # http: return remote's addchangegroup() or 0 for error
1898 1880 ret = remote.unbundle(cg, remoteheads, 'push')
1899 1881 else:
1900 1882 # we return an integer indicating remote head count
1901 1883 # change
1902 1884 ret = remote.addchangegroup(cg, 'push', self.url())
1903 1885
1904 1886 if ret:
1905 1887 # push succeed, synchronize target of the push
1906 1888 cheads = outgoing.missingheads
1907 1889 elif revs is None:
1908 1890 # All out push fails. synchronize all common
1909 1891 cheads = outgoing.commonheads
1910 1892 else:
1911 1893 # I want cheads = heads(::missingheads and ::commonheads)
1912 1894 # (missingheads is revs with secret changeset filtered out)
1913 1895 #
1914 1896 # This can be expressed as:
1915 1897 # cheads = ( (missingheads and ::commonheads)
1916 1898 # + (commonheads and ::missingheads))"
1917 1899 # )
1918 1900 #
1919 1901 # while trying to push we already computed the following:
1920 1902 # common = (::commonheads)
1921 1903 # missing = ((commonheads::missingheads) - commonheads)
1922 1904 #
1923 1905 # We can pick:
1924 1906 # * missingheads part of common (::commonheads)
1925 1907 common = set(outgoing.common)
1926 1908 cheads = [node for node in revs if node in common]
1927 1909 # and
1928 1910 # * commonheads parents on missing
1929 1911 revset = unfi.set('%ln and parents(roots(%ln))',
1930 1912 outgoing.commonheads,
1931 1913 outgoing.missing)
1932 1914 cheads.extend(c.node() for c in revset)
1933 1915 # even when we don't push, exchanging phase data is useful
1934 1916 remotephases = remote.listkeys('phases')
1935 1917 if (self.ui.configbool('ui', '_usedassubrepo', False)
1936 1918 and remotephases # server supports phases
1937 1919 and ret is None # nothing was pushed
1938 1920 and remotephases.get('publishing', False)):
1939 1921 # When:
1940 1922 # - this is a subrepo push
1941 1923 # - and remote support phase
1942 1924 # - and no changeset was pushed
1943 1925 # - and remote is publishing
1944 1926 # We may be in issue 3871 case!
1945 1927 # We drop the possible phase synchronisation done by
1946 1928 # courtesy to publish changesets possibly locally draft
1947 1929 # on the remote.
1948 1930 remotephases = {'publishing': 'True'}
1949 1931 if not remotephases: # old server or public only repo
1950 1932 localphasemove(cheads)
1951 1933 # don't push any phase data as there is nothing to push
1952 1934 else:
1953 1935 ana = phases.analyzeremotephases(self, cheads, remotephases)
1954 1936 pheads, droots = ana
1955 1937 ### Apply remote phase on local
1956 1938 if remotephases.get('publishing', False):
1957 1939 localphasemove(cheads)
1958 1940 else: # publish = False
1959 1941 localphasemove(pheads)
1960 1942 localphasemove(cheads, phases.draft)
1961 1943 ### Apply local phase on remote
1962 1944
1963 1945 # Get the list of all revs draft on remote by public here.
1964 1946 # XXX Beware that revset break if droots is not strictly
1965 1947 # XXX root we may want to ensure it is but it is costly
1966 1948 outdated = unfi.set('heads((%ln::%ln) and public())',
1967 1949 droots, cheads)
1968 1950 for newremotehead in outdated:
1969 1951 r = remote.pushkey('phases',
1970 1952 newremotehead.hex(),
1971 1953 str(phases.draft),
1972 1954 str(phases.public))
1973 1955 if not r:
1974 1956 self.ui.warn(_('updating %s to public failed!\n')
1975 1957 % newremotehead)
1976 1958 self.ui.debug('try to push obsolete markers to remote\n')
1977 1959 obsolete.syncpush(self, remote)
1978 1960 finally:
1979 1961 if lock is not None:
1980 1962 lock.release()
1981 1963 finally:
1982 1964 if locallock is not None:
1983 1965 locallock.release()
1984 1966
1985 1967 bookmarks.updateremote(self.ui, unfi, remote, revs)
1986 1968 return ret
1987 1969
1988 1970 def changegroupinfo(self, nodes, source):
1989 1971 if self.ui.verbose or source == 'bundle':
1990 1972 self.ui.status(_("%d changesets found\n") % len(nodes))
1991 1973 if self.ui.debugflag:
1992 1974 self.ui.debug("list of changesets:\n")
1993 1975 for node in nodes:
1994 1976 self.ui.debug("%s\n" % hex(node))
1995 1977
1996 1978 def changegroupsubset(self, bases, heads, source):
1997 1979 """Compute a changegroup consisting of all the nodes that are
1998 1980 descendants of any of the bases and ancestors of any of the heads.
1999 1981 Return a chunkbuffer object whose read() method will return
2000 1982 successive changegroup chunks.
2001 1983
2002 1984 It is fairly complex as determining which filenodes and which
2003 1985 manifest nodes need to be included for the changeset to be complete
2004 1986 is non-trivial.
2005 1987
2006 1988 Another wrinkle is doing the reverse, figuring out which changeset in
2007 1989 the changegroup a particular filenode or manifestnode belongs to.
2008 1990 """
2009 1991 cl = self.changelog
2010 1992 if not bases:
2011 1993 bases = [nullid]
2012 1994 # TODO: remove call to nodesbetween.
2013 1995 csets, bases, heads = cl.nodesbetween(bases, heads)
2014 1996 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
2015 1997 outgoing = discovery.outgoing(cl, bases, heads)
2016 1998 bundler = changegroup.bundle10(self)
2017 1999 return self._changegroupsubset(outgoing, bundler, source)
2018 2000
2019 2001 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2020 2002 """Like getbundle, but taking a discovery.outgoing as an argument.
2021 2003
2022 2004 This is only implemented for local repos and reuses potentially
2023 2005 precomputed sets in outgoing."""
2024 2006 if not outgoing.missing:
2025 2007 return None
2026 2008 bundler = changegroup.bundle10(self, bundlecaps)
2027 2009 return self._changegroupsubset(outgoing, bundler, source)
2028 2010
2029 2011 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2030 2012 """Like changegroupsubset, but returns the set difference between the
2031 2013 ancestors of heads and the ancestors common.
2032 2014
2033 2015 If heads is None, use the local heads. If common is None, use [nullid].
2034 2016
2035 2017 The nodes in common might not all be known locally due to the way the
2036 2018 current discovery protocol works.
2037 2019 """
2038 2020 cl = self.changelog
2039 2021 if common:
2040 2022 hasnode = cl.hasnode
2041 2023 common = [n for n in common if hasnode(n)]
2042 2024 else:
2043 2025 common = [nullid]
2044 2026 if not heads:
2045 2027 heads = cl.heads()
2046 2028 return self.getlocalbundle(source,
2047 2029 discovery.outgoing(cl, common, heads),
2048 2030 bundlecaps=bundlecaps)
2049 2031
2050 2032 @unfilteredmethod
2051 2033 def _changegroupsubset(self, outgoing, bundler, source,
2052 2034 fastpath=False):
2053 2035 commonrevs = outgoing.common
2054 2036 csets = outgoing.missing
2055 2037 heads = outgoing.missingheads
2056 2038 # We go through the fast path if we get told to, or if all (unfiltered
2057 2039 # heads have been requested (since we then know there all linkrevs will
2058 2040 # be pulled by the client).
2059 2041 heads.sort()
2060 2042 fastpathlinkrev = fastpath or (
2061 2043 self.filtername is None and heads == sorted(self.heads()))
2062 2044
2063 2045 self.hook('preoutgoing', throw=True, source=source)
2064 2046 self.changegroupinfo(csets, source)
2065 2047 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2066 2048 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2067 2049
2068 2050 def changegroup(self, basenodes, source):
2069 2051 # to avoid a race we use changegroupsubset() (issue1320)
2070 2052 return self.changegroupsubset(basenodes, self.heads(), source)
2071 2053
2072 2054 @unfilteredmethod
2073 2055 def addchangegroup(self, source, srctype, url, emptyok=False):
2074 2056 """Add the changegroup returned by source.read() to this repo.
2075 2057 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2076 2058 the URL of the repo where this changegroup is coming from.
2077 2059
2078 2060 Return an integer summarizing the change to this repo:
2079 2061 - nothing changed or no source: 0
2080 2062 - more heads than before: 1+added heads (2..n)
2081 2063 - fewer heads than before: -1-removed heads (-2..-n)
2082 2064 - number of heads stays the same: 1
2083 2065 """
2084 2066 def csmap(x):
2085 2067 self.ui.debug("add changeset %s\n" % short(x))
2086 2068 return len(cl)
2087 2069
2088 2070 def revmap(x):
2089 2071 return cl.rev(x)
2090 2072
2091 2073 if not source:
2092 2074 return 0
2093 2075
2094 2076 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2095 2077
2096 2078 changesets = files = revisions = 0
2097 2079 efiles = set()
2098 2080
2099 2081 # write changelog data to temp files so concurrent readers will not see
2100 2082 # inconsistent view
2101 2083 cl = self.changelog
2102 2084 cl.delayupdate()
2103 2085 oldheads = cl.heads()
2104 2086
2105 2087 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2106 2088 try:
2107 2089 trp = weakref.proxy(tr)
2108 2090 # pull off the changeset group
2109 2091 self.ui.status(_("adding changesets\n"))
2110 2092 clstart = len(cl)
2111 2093 class prog(object):
2112 2094 step = _('changesets')
2113 2095 count = 1
2114 2096 ui = self.ui
2115 2097 total = None
2116 2098 def __call__(self):
2117 2099 self.ui.progress(self.step, self.count, unit=_('chunks'),
2118 2100 total=self.total)
2119 2101 self.count += 1
2120 2102 pr = prog()
2121 2103 source.callback = pr
2122 2104
2123 2105 source.changelogheader()
2124 2106 srccontent = cl.addgroup(source, csmap, trp)
2125 2107 if not (srccontent or emptyok):
2126 2108 raise util.Abort(_("received changelog group is empty"))
2127 2109 clend = len(cl)
2128 2110 changesets = clend - clstart
2129 2111 for c in xrange(clstart, clend):
2130 2112 efiles.update(self[c].files())
2131 2113 efiles = len(efiles)
2132 2114 self.ui.progress(_('changesets'), None)
2133 2115
2134 2116 # pull off the manifest group
2135 2117 self.ui.status(_("adding manifests\n"))
2136 2118 pr.step = _('manifests')
2137 2119 pr.count = 1
2138 2120 pr.total = changesets # manifests <= changesets
2139 2121 # no need to check for empty manifest group here:
2140 2122 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2141 2123 # no new manifest will be created and the manifest group will
2142 2124 # be empty during the pull
2143 2125 source.manifestheader()
2144 2126 self.manifest.addgroup(source, revmap, trp)
2145 2127 self.ui.progress(_('manifests'), None)
2146 2128
2147 2129 needfiles = {}
2148 2130 if self.ui.configbool('server', 'validate', default=False):
2149 2131 # validate incoming csets have their manifests
2150 2132 for cset in xrange(clstart, clend):
2151 2133 mfest = self.changelog.read(self.changelog.node(cset))[0]
2152 2134 mfest = self.manifest.readdelta(mfest)
2153 2135 # store file nodes we must see
2154 2136 for f, n in mfest.iteritems():
2155 2137 needfiles.setdefault(f, set()).add(n)
2156 2138
2157 2139 # process the files
2158 2140 self.ui.status(_("adding file changes\n"))
2159 2141 pr.step = _('files')
2160 2142 pr.count = 1
2161 2143 pr.total = efiles
2162 2144 source.callback = None
2163 2145
2164 2146 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2165 2147 pr, needfiles)
2166 2148 revisions += newrevs
2167 2149 files += newfiles
2168 2150
2169 2151 dh = 0
2170 2152 if oldheads:
2171 2153 heads = cl.heads()
2172 2154 dh = len(heads) - len(oldheads)
2173 2155 for h in heads:
2174 2156 if h not in oldheads and self[h].closesbranch():
2175 2157 dh -= 1
2176 2158 htext = ""
2177 2159 if dh:
2178 2160 htext = _(" (%+d heads)") % dh
2179 2161
2180 2162 self.ui.status(_("added %d changesets"
2181 2163 " with %d changes to %d files%s\n")
2182 2164 % (changesets, revisions, files, htext))
2183 2165 self.invalidatevolatilesets()
2184 2166
2185 2167 if changesets > 0:
2186 2168 p = lambda: cl.writepending() and self.root or ""
2187 2169 self.hook('pretxnchangegroup', throw=True,
2188 2170 node=hex(cl.node(clstart)), source=srctype,
2189 2171 url=url, pending=p)
2190 2172
2191 2173 added = [cl.node(r) for r in xrange(clstart, clend)]
2192 2174 publishing = self.ui.configbool('phases', 'publish', True)
2193 2175 if srctype == 'push':
2194 2176 # Old server can not push the boundary themself.
2195 2177 # New server won't push the boundary if changeset already
2196 2178 # existed locally as secrete
2197 2179 #
2198 2180 # We should not use added here but the list of all change in
2199 2181 # the bundle
2200 2182 if publishing:
2201 2183 phases.advanceboundary(self, phases.public, srccontent)
2202 2184 else:
2203 2185 phases.advanceboundary(self, phases.draft, srccontent)
2204 2186 phases.retractboundary(self, phases.draft, added)
2205 2187 elif srctype != 'strip':
2206 2188 # publishing only alter behavior during push
2207 2189 #
2208 2190 # strip should not touch boundary at all
2209 2191 phases.retractboundary(self, phases.draft, added)
2210 2192
2211 2193 # make changelog see real files again
2212 2194 cl.finalize(trp)
2213 2195
2214 2196 tr.close()
2215 2197
2216 2198 if changesets > 0:
2217 2199 if srctype != 'strip':
2218 2200 # During strip, branchcache is invalid but coming call to
2219 2201 # `destroyed` will repair it.
2220 2202 # In other case we can safely update cache on disk.
2221 2203 branchmap.updatecache(self.filtered('served'))
2222 2204 def runhooks():
2223 2205 # These hooks run when the lock releases, not when the
2224 2206 # transaction closes. So it's possible for the changelog
2225 2207 # to have changed since we last saw it.
2226 2208 if clstart >= len(self):
2227 2209 return
2228 2210
2229 2211 # forcefully update the on-disk branch cache
2230 2212 self.ui.debug("updating the branch cache\n")
2231 2213 self.hook("changegroup", node=hex(cl.node(clstart)),
2232 2214 source=srctype, url=url)
2233 2215
2234 2216 for n in added:
2235 2217 self.hook("incoming", node=hex(n), source=srctype,
2236 2218 url=url)
2237 2219
2238 2220 newheads = [h for h in self.heads() if h not in oldheads]
2239 2221 self.ui.log("incoming",
2240 2222 "%s incoming changes - new heads: %s\n",
2241 2223 len(added),
2242 2224 ', '.join([hex(c[:6]) for c in newheads]))
2243 2225 self._afterlock(runhooks)
2244 2226
2245 2227 finally:
2246 2228 tr.release()
2247 2229 # never return 0 here:
2248 2230 if dh < 0:
2249 2231 return dh - 1
2250 2232 else:
2251 2233 return dh + 1
2252 2234
2253 2235 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2254 2236 revisions = 0
2255 2237 files = 0
2256 2238 while True:
2257 2239 chunkdata = source.filelogheader()
2258 2240 if not chunkdata:
2259 2241 break
2260 2242 f = chunkdata["filename"]
2261 2243 self.ui.debug("adding %s revisions\n" % f)
2262 2244 pr()
2263 2245 fl = self.file(f)
2264 2246 o = len(fl)
2265 2247 if not fl.addgroup(source, revmap, trp):
2266 2248 raise util.Abort(_("received file revlog group is empty"))
2267 2249 revisions += len(fl) - o
2268 2250 files += 1
2269 2251 if f in needfiles:
2270 2252 needs = needfiles[f]
2271 2253 for new in xrange(o, len(fl)):
2272 2254 n = fl.node(new)
2273 2255 if n in needs:
2274 2256 needs.remove(n)
2275 2257 else:
2276 2258 raise util.Abort(
2277 2259 _("received spurious file revlog entry"))
2278 2260 if not needs:
2279 2261 del needfiles[f]
2280 2262 self.ui.progress(_('files'), None)
2281 2263
2282 2264 for f, needs in needfiles.iteritems():
2283 2265 fl = self.file(f)
2284 2266 for n in needs:
2285 2267 try:
2286 2268 fl.rev(n)
2287 2269 except error.LookupError:
2288 2270 raise util.Abort(
2289 2271 _('missing file data for %s:%s - run hg verify') %
2290 2272 (f, hex(n)))
2291 2273
2292 2274 return revisions, files
2293 2275
2294 2276 def stream_in(self, remote, requirements):
2295 2277 lock = self.lock()
2296 2278 try:
2297 2279 # Save remote branchmap. We will use it later
2298 2280 # to speed up branchcache creation
2299 2281 rbranchmap = None
2300 2282 if remote.capable("branchmap"):
2301 2283 rbranchmap = remote.branchmap()
2302 2284
2303 2285 fp = remote.stream_out()
2304 2286 l = fp.readline()
2305 2287 try:
2306 2288 resp = int(l)
2307 2289 except ValueError:
2308 2290 raise error.ResponseError(
2309 2291 _('unexpected response from remote server:'), l)
2310 2292 if resp == 1:
2311 2293 raise util.Abort(_('operation forbidden by server'))
2312 2294 elif resp == 2:
2313 2295 raise util.Abort(_('locking the remote repository failed'))
2314 2296 elif resp != 0:
2315 2297 raise util.Abort(_('the server sent an unknown error code'))
2316 2298 self.ui.status(_('streaming all changes\n'))
2317 2299 l = fp.readline()
2318 2300 try:
2319 2301 total_files, total_bytes = map(int, l.split(' ', 1))
2320 2302 except (ValueError, TypeError):
2321 2303 raise error.ResponseError(
2322 2304 _('unexpected response from remote server:'), l)
2323 2305 self.ui.status(_('%d files to transfer, %s of data\n') %
2324 2306 (total_files, util.bytecount(total_bytes)))
2325 2307 handled_bytes = 0
2326 2308 self.ui.progress(_('clone'), 0, total=total_bytes)
2327 2309 start = time.time()
2328 2310 for i in xrange(total_files):
2329 2311 # XXX doesn't support '\n' or '\r' in filenames
2330 2312 l = fp.readline()
2331 2313 try:
2332 2314 name, size = l.split('\0', 1)
2333 2315 size = int(size)
2334 2316 except (ValueError, TypeError):
2335 2317 raise error.ResponseError(
2336 2318 _('unexpected response from remote server:'), l)
2337 2319 if self.ui.debugflag:
2338 2320 self.ui.debug('adding %s (%s)\n' %
2339 2321 (name, util.bytecount(size)))
2340 2322 # for backwards compat, name was partially encoded
2341 2323 ofp = self.sopener(store.decodedir(name), 'w')
2342 2324 for chunk in util.filechunkiter(fp, limit=size):
2343 2325 handled_bytes += len(chunk)
2344 2326 self.ui.progress(_('clone'), handled_bytes,
2345 2327 total=total_bytes)
2346 2328 ofp.write(chunk)
2347 2329 ofp.close()
2348 2330 elapsed = time.time() - start
2349 2331 if elapsed <= 0:
2350 2332 elapsed = 0.001
2351 2333 self.ui.progress(_('clone'), None)
2352 2334 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2353 2335 (util.bytecount(total_bytes), elapsed,
2354 2336 util.bytecount(total_bytes / elapsed)))
2355 2337
2356 2338 # new requirements = old non-format requirements +
2357 2339 # new format-related
2358 2340 # requirements from the streamed-in repository
2359 2341 requirements.update(set(self.requirements) - self.supportedformats)
2360 2342 self._applyrequirements(requirements)
2361 2343 self._writerequirements()
2362 2344
2363 2345 if rbranchmap:
2364 2346 rbheads = []
2365 2347 for bheads in rbranchmap.itervalues():
2366 2348 rbheads.extend(bheads)
2367 2349
2368 2350 if rbheads:
2369 2351 rtiprev = max((int(self.changelog.rev(node))
2370 2352 for node in rbheads))
2371 2353 cache = branchmap.branchcache(rbranchmap,
2372 2354 self[rtiprev].node(),
2373 2355 rtiprev)
2374 2356 # Try to stick it as low as possible
2375 2357 # filter above served are unlikely to be fetch from a clone
2376 2358 for candidate in ('base', 'immutable', 'served'):
2377 2359 rview = self.filtered(candidate)
2378 2360 if cache.validfor(rview):
2379 2361 self._branchcaches[candidate] = cache
2380 2362 cache.write(rview)
2381 2363 break
2382 2364 self.invalidate()
2383 2365 return len(self.heads()) + 1
2384 2366 finally:
2385 2367 lock.release()
2386 2368
2387 2369 def clone(self, remote, heads=[], stream=False):
2388 2370 '''clone remote repository.
2389 2371
2390 2372 keyword arguments:
2391 2373 heads: list of revs to clone (forces use of pull)
2392 2374 stream: use streaming clone if possible'''
2393 2375
2394 2376 # now, all clients that can request uncompressed clones can
2395 2377 # read repo formats supported by all servers that can serve
2396 2378 # them.
2397 2379
2398 2380 # if revlog format changes, client will have to check version
2399 2381 # and format flags on "stream" capability, and use
2400 2382 # uncompressed only if compatible.
2401 2383
2402 2384 if not stream:
2403 2385 # if the server explicitly prefers to stream (for fast LANs)
2404 2386 stream = remote.capable('stream-preferred')
2405 2387
2406 2388 if stream and not heads:
2407 2389 # 'stream' means remote revlog format is revlogv1 only
2408 2390 if remote.capable('stream'):
2409 2391 return self.stream_in(remote, set(('revlogv1',)))
2410 2392 # otherwise, 'streamreqs' contains the remote revlog format
2411 2393 streamreqs = remote.capable('streamreqs')
2412 2394 if streamreqs:
2413 2395 streamreqs = set(streamreqs.split(','))
2414 2396 # if we support it, stream in and adjust our requirements
2415 2397 if not streamreqs - self.supportedformats:
2416 2398 return self.stream_in(remote, streamreqs)
2417 2399 return self.pull(remote, heads)
2418 2400
2419 2401 def pushkey(self, namespace, key, old, new):
2420 2402 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2421 2403 old=old, new=new)
2422 2404 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2423 2405 ret = pushkey.push(self, namespace, key, old, new)
2424 2406 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2425 2407 ret=ret)
2426 2408 return ret
2427 2409
2428 2410 def listkeys(self, namespace):
2429 2411 self.hook('prelistkeys', throw=True, namespace=namespace)
2430 2412 self.ui.debug('listing keys for "%s"\n' % namespace)
2431 2413 values = pushkey.list(self, namespace)
2432 2414 self.hook('listkeys', namespace=namespace, values=values)
2433 2415 return values
2434 2416
2435 2417 def debugwireargs(self, one, two, three=None, four=None, five=None):
2436 2418 '''used to test argument passing over the wire'''
2437 2419 return "%s %s %s %s %s" % (one, two, three, four, five)
2438 2420
2439 2421 def savecommitmessage(self, text):
2440 2422 fp = self.opener('last-message.txt', 'wb')
2441 2423 try:
2442 2424 fp.write(text)
2443 2425 finally:
2444 2426 fp.close()
2445 2427 return self.pathto(fp.name[len(self.root) + 1:])
2446 2428
2447 2429 # used to avoid circular references so destructors work
2448 2430 def aftertrans(files):
2449 2431 renamefiles = [tuple(t) for t in files]
2450 2432 def a():
2451 2433 for vfs, src, dest in renamefiles:
2452 2434 try:
2453 2435 vfs.rename(src, dest)
2454 2436 except OSError: # journal file does not yet exist
2455 2437 pass
2456 2438 return a
2457 2439
2458 2440 def undoname(fn):
2459 2441 base, name = os.path.split(fn)
2460 2442 assert name.startswith('journal')
2461 2443 return os.path.join(base, name.replace('journal', 'undo', 1))
2462 2444
2463 2445 def instance(ui, path, create):
2464 2446 return localrepository(ui, util.urllocalpath(path), create)
2465 2447
2466 2448 def islocal(path):
2467 2449 return True
General Comments 0
You need to be logged in to leave comments. Login now