##// END OF EJS Templates
localrepo: make report level in repo.transaction configurable...
David Soria Parra -
r19853:eddc2a2d default
parent child Browse files
Show More
@@ -1,2470 +1,2470
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 unfi = repo.unfiltered()
43 43 if unfi is repo:
44 44 return super(unfilteredpropertycache, self).__get__(unfi)
45 45 return getattr(unfi, self.name)
46 46
47 47 class filteredpropertycache(propertycache):
48 48 """propertycache that must take filtering in account"""
49 49
50 50 def cachevalue(self, obj, value):
51 51 object.__setattr__(obj, self.name, value)
52 52
53 53
54 54 def hasunfilteredcache(repo, name):
55 55 """check if a repo has an unfilteredpropertycache value for <name>"""
56 56 return name in vars(repo.unfiltered())
57 57
58 58 def unfilteredmethod(orig):
59 59 """decorate method that always need to be run on unfiltered version"""
60 60 def wrapper(repo, *args, **kwargs):
61 61 return orig(repo.unfiltered(), *args, **kwargs)
62 62 return wrapper
63 63
64 64 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
65 65 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
66 66
67 67 class localpeer(peer.peerrepository):
68 68 '''peer for a local repo; reflects only the most recent API'''
69 69
70 70 def __init__(self, repo, caps=MODERNCAPS):
71 71 peer.peerrepository.__init__(self)
72 72 self._repo = repo.filtered('served')
73 73 self.ui = repo.ui
74 74 self._caps = repo._restrictcapabilities(caps)
75 75 self.requirements = repo.requirements
76 76 self.supportedformats = repo.supportedformats
77 77
78 78 def close(self):
79 79 self._repo.close()
80 80
81 81 def _capabilities(self):
82 82 return self._caps
83 83
84 84 def local(self):
85 85 return self._repo
86 86
87 87 def canpush(self):
88 88 return True
89 89
90 90 def url(self):
91 91 return self._repo.url()
92 92
93 93 def lookup(self, key):
94 94 return self._repo.lookup(key)
95 95
96 96 def branchmap(self):
97 97 return self._repo.branchmap()
98 98
99 99 def heads(self):
100 100 return self._repo.heads()
101 101
102 102 def known(self, nodes):
103 103 return self._repo.known(nodes)
104 104
105 105 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
106 106 return self._repo.getbundle(source, heads=heads, common=common,
107 107 bundlecaps=None)
108 108
109 109 # TODO We might want to move the next two calls into legacypeer and add
110 110 # unbundle instead.
111 111
112 112 def lock(self):
113 113 return self._repo.lock()
114 114
115 115 def addchangegroup(self, cg, source, url):
116 116 return self._repo.addchangegroup(cg, source, url)
117 117
118 118 def pushkey(self, namespace, key, old, new):
119 119 return self._repo.pushkey(namespace, key, old, new)
120 120
121 121 def listkeys(self, namespace):
122 122 return self._repo.listkeys(namespace)
123 123
124 124 def debugwireargs(self, one, two, three=None, four=None, five=None):
125 125 '''used to test argument passing over the wire'''
126 126 return "%s %s %s %s %s" % (one, two, three, four, five)
127 127
128 128 class locallegacypeer(localpeer):
129 129 '''peer extension which implements legacy methods too; used for tests with
130 130 restricted capabilities'''
131 131
132 132 def __init__(self, repo):
133 133 localpeer.__init__(self, repo, caps=LEGACYCAPS)
134 134
135 135 def branches(self, nodes):
136 136 return self._repo.branches(nodes)
137 137
138 138 def between(self, pairs):
139 139 return self._repo.between(pairs)
140 140
141 141 def changegroup(self, basenodes, source):
142 142 return self._repo.changegroup(basenodes, source)
143 143
144 144 def changegroupsubset(self, bases, heads, source):
145 145 return self._repo.changegroupsubset(bases, heads, source)
146 146
147 147 class localrepository(object):
148 148
149 149 supportedformats = set(('revlogv1', 'generaldelta'))
150 150 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
151 151 'dotencode'))
152 152 openerreqs = set(('revlogv1', 'generaldelta'))
153 153 requirements = ['revlogv1']
154 154 filtername = None
155 155
156 156 featuresetupfuncs = set()
157 157
158 158 def _baserequirements(self, create):
159 159 return self.requirements[:]
160 160
161 161 def __init__(self, baseui, path=None, create=False):
162 162 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
163 163 self.wopener = self.wvfs
164 164 self.root = self.wvfs.base
165 165 self.path = self.wvfs.join(".hg")
166 166 self.origroot = path
167 167 self.auditor = scmutil.pathauditor(self.root, self._checknested)
168 168 self.vfs = scmutil.vfs(self.path)
169 169 self.opener = self.vfs
170 170 self.baseui = baseui
171 171 self.ui = baseui.copy()
172 172 # A list of callback to shape the phase if no data were found.
173 173 # Callback are in the form: func(repo, roots) --> processed root.
174 174 # This list it to be filled by extension during repo setup
175 175 self._phasedefaults = []
176 176 try:
177 177 self.ui.readconfig(self.join("hgrc"), self.root)
178 178 extensions.loadall(self.ui)
179 179 except IOError:
180 180 pass
181 181
182 182 if self.featuresetupfuncs:
183 183 self.supported = set(self._basesupported) # use private copy
184 184 for setupfunc in self.featuresetupfuncs:
185 185 setupfunc(self.ui, self.supported)
186 186 else:
187 187 self.supported = self._basesupported
188 188
189 189 if not self.vfs.isdir():
190 190 if create:
191 191 if not self.wvfs.exists():
192 192 self.wvfs.makedirs()
193 193 self.vfs.makedir(notindexed=True)
194 194 requirements = self._baserequirements(create)
195 195 if self.ui.configbool('format', 'usestore', True):
196 196 self.vfs.mkdir("store")
197 197 requirements.append("store")
198 198 if self.ui.configbool('format', 'usefncache', True):
199 199 requirements.append("fncache")
200 200 if self.ui.configbool('format', 'dotencode', True):
201 201 requirements.append('dotencode')
202 202 # create an invalid changelog
203 203 self.vfs.append(
204 204 "00changelog.i",
205 205 '\0\0\0\2' # represents revlogv2
206 206 ' dummy changelog to prevent using the old repo layout'
207 207 )
208 208 if self.ui.configbool('format', 'generaldelta', False):
209 209 requirements.append("generaldelta")
210 210 requirements = set(requirements)
211 211 else:
212 212 raise error.RepoError(_("repository %s not found") % path)
213 213 elif create:
214 214 raise error.RepoError(_("repository %s already exists") % path)
215 215 else:
216 216 try:
217 217 requirements = scmutil.readrequires(self.vfs, self.supported)
218 218 except IOError, inst:
219 219 if inst.errno != errno.ENOENT:
220 220 raise
221 221 requirements = set()
222 222
223 223 self.sharedpath = self.path
224 224 try:
225 225 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
226 226 realpath=True)
227 227 s = vfs.base
228 228 if not vfs.exists():
229 229 raise error.RepoError(
230 230 _('.hg/sharedpath points to nonexistent directory %s') % s)
231 231 self.sharedpath = s
232 232 except IOError, inst:
233 233 if inst.errno != errno.ENOENT:
234 234 raise
235 235
236 236 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
237 237 self.spath = self.store.path
238 238 self.svfs = self.store.vfs
239 239 self.sopener = self.svfs
240 240 self.sjoin = self.store.join
241 241 self.vfs.createmode = self.store.createmode
242 242 self._applyrequirements(requirements)
243 243 if create:
244 244 self._writerequirements()
245 245
246 246
247 247 self._branchcaches = {}
248 248 self.filterpats = {}
249 249 self._datafilters = {}
250 250 self._transref = self._lockref = self._wlockref = None
251 251
252 252 # A cache for various files under .hg/ that tracks file changes,
253 253 # (used by the filecache decorator)
254 254 #
255 255 # Maps a property name to its util.filecacheentry
256 256 self._filecache = {}
257 257
258 258 # hold sets of revision to be filtered
259 259 # should be cleared when something might have changed the filter value:
260 260 # - new changesets,
261 261 # - phase change,
262 262 # - new obsolescence marker,
263 263 # - working directory parent change,
264 264 # - bookmark changes
265 265 self.filteredrevcache = {}
266 266
267 267 def close(self):
268 268 pass
269 269
270 270 def _restrictcapabilities(self, caps):
271 271 return caps
272 272
273 273 def _applyrequirements(self, requirements):
274 274 self.requirements = requirements
275 275 self.sopener.options = dict((r, 1) for r in requirements
276 276 if r in self.openerreqs)
277 277
278 278 def _writerequirements(self):
279 279 reqfile = self.opener("requires", "w")
280 280 for r in sorted(self.requirements):
281 281 reqfile.write("%s\n" % r)
282 282 reqfile.close()
283 283
284 284 def _checknested(self, path):
285 285 """Determine if path is a legal nested repository."""
286 286 if not path.startswith(self.root):
287 287 return False
288 288 subpath = path[len(self.root) + 1:]
289 289 normsubpath = util.pconvert(subpath)
290 290
291 291 # XXX: Checking against the current working copy is wrong in
292 292 # the sense that it can reject things like
293 293 #
294 294 # $ hg cat -r 10 sub/x.txt
295 295 #
296 296 # if sub/ is no longer a subrepository in the working copy
297 297 # parent revision.
298 298 #
299 299 # However, it can of course also allow things that would have
300 300 # been rejected before, such as the above cat command if sub/
301 301 # is a subrepository now, but was a normal directory before.
302 302 # The old path auditor would have rejected by mistake since it
303 303 # panics when it sees sub/.hg/.
304 304 #
305 305 # All in all, checking against the working copy seems sensible
306 306 # since we want to prevent access to nested repositories on
307 307 # the filesystem *now*.
308 308 ctx = self[None]
309 309 parts = util.splitpath(subpath)
310 310 while parts:
311 311 prefix = '/'.join(parts)
312 312 if prefix in ctx.substate:
313 313 if prefix == normsubpath:
314 314 return True
315 315 else:
316 316 sub = ctx.sub(prefix)
317 317 return sub.checknested(subpath[len(prefix) + 1:])
318 318 else:
319 319 parts.pop()
320 320 return False
321 321
322 322 def peer(self):
323 323 return localpeer(self) # not cached to avoid reference cycle
324 324
325 325 def unfiltered(self):
326 326 """Return unfiltered version of the repository
327 327
328 328 Intended to be overwritten by filtered repo."""
329 329 return self
330 330
331 331 def filtered(self, name):
332 332 """Return a filtered version of a repository"""
333 333 # build a new class with the mixin and the current class
334 334 # (possibly subclass of the repo)
335 335 class proxycls(repoview.repoview, self.unfiltered().__class__):
336 336 pass
337 337 return proxycls(self, name)
338 338
339 339 @repofilecache('bookmarks')
340 340 def _bookmarks(self):
341 341 return bookmarks.bmstore(self)
342 342
343 343 @repofilecache('bookmarks.current')
344 344 def _bookmarkcurrent(self):
345 345 return bookmarks.readcurrent(self)
346 346
347 347 def bookmarkheads(self, bookmark):
348 348 name = bookmark.split('@', 1)[0]
349 349 heads = []
350 350 for mark, n in self._bookmarks.iteritems():
351 351 if mark.split('@', 1)[0] == name:
352 352 heads.append(n)
353 353 return heads
354 354
355 355 @storecache('phaseroots')
356 356 def _phasecache(self):
357 357 return phases.phasecache(self, self._phasedefaults)
358 358
359 359 @storecache('obsstore')
360 360 def obsstore(self):
361 361 store = obsolete.obsstore(self.sopener)
362 362 if store and not obsolete._enabled:
363 363 # message is rare enough to not be translated
364 364 msg = 'obsolete feature not enabled but %i markers found!\n'
365 365 self.ui.warn(msg % len(list(store)))
366 366 return store
367 367
368 368 @storecache('00changelog.i')
369 369 def changelog(self):
370 370 c = changelog.changelog(self.sopener)
371 371 if 'HG_PENDING' in os.environ:
372 372 p = os.environ['HG_PENDING']
373 373 if p.startswith(self.root):
374 374 c.readpending('00changelog.i.a')
375 375 return c
376 376
377 377 @storecache('00manifest.i')
378 378 def manifest(self):
379 379 return manifest.manifest(self.sopener)
380 380
381 381 @repofilecache('dirstate')
382 382 def dirstate(self):
383 383 warned = [0]
384 384 def validate(node):
385 385 try:
386 386 self.changelog.rev(node)
387 387 return node
388 388 except error.LookupError:
389 389 if not warned[0]:
390 390 warned[0] = True
391 391 self.ui.warn(_("warning: ignoring unknown"
392 392 " working parent %s!\n") % short(node))
393 393 return nullid
394 394
395 395 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
396 396
397 397 def __getitem__(self, changeid):
398 398 if changeid is None:
399 399 return context.workingctx(self)
400 400 return context.changectx(self, changeid)
401 401
402 402 def __contains__(self, changeid):
403 403 try:
404 404 return bool(self.lookup(changeid))
405 405 except error.RepoLookupError:
406 406 return False
407 407
408 408 def __nonzero__(self):
409 409 return True
410 410
411 411 def __len__(self):
412 412 return len(self.changelog)
413 413
414 414 def __iter__(self):
415 415 return iter(self.changelog)
416 416
417 417 def revs(self, expr, *args):
418 418 '''Return a list of revisions matching the given revset'''
419 419 expr = revset.formatspec(expr, *args)
420 420 m = revset.match(None, expr)
421 421 return [r for r in m(self, list(self))]
422 422
423 423 def set(self, expr, *args):
424 424 '''
425 425 Yield a context for each matching revision, after doing arg
426 426 replacement via revset.formatspec
427 427 '''
428 428 for r in self.revs(expr, *args):
429 429 yield self[r]
430 430
431 431 def url(self):
432 432 return 'file:' + self.root
433 433
434 434 def hook(self, name, throw=False, **args):
435 435 return hook.hook(self.ui, self, name, throw, **args)
436 436
437 437 @unfilteredmethod
438 438 def _tag(self, names, node, message, local, user, date, extra={}):
439 439 if isinstance(names, str):
440 440 names = (names,)
441 441
442 442 branches = self.branchmap()
443 443 for name in names:
444 444 self.hook('pretag', throw=True, node=hex(node), tag=name,
445 445 local=local)
446 446 if name in branches:
447 447 self.ui.warn(_("warning: tag %s conflicts with existing"
448 448 " branch name\n") % name)
449 449
450 450 def writetags(fp, names, munge, prevtags):
451 451 fp.seek(0, 2)
452 452 if prevtags and prevtags[-1] != '\n':
453 453 fp.write('\n')
454 454 for name in names:
455 455 m = munge and munge(name) or name
456 456 if (self._tagscache.tagtypes and
457 457 name in self._tagscache.tagtypes):
458 458 old = self.tags().get(name, nullid)
459 459 fp.write('%s %s\n' % (hex(old), m))
460 460 fp.write('%s %s\n' % (hex(node), m))
461 461 fp.close()
462 462
463 463 prevtags = ''
464 464 if local:
465 465 try:
466 466 fp = self.opener('localtags', 'r+')
467 467 except IOError:
468 468 fp = self.opener('localtags', 'a')
469 469 else:
470 470 prevtags = fp.read()
471 471
472 472 # local tags are stored in the current charset
473 473 writetags(fp, names, None, prevtags)
474 474 for name in names:
475 475 self.hook('tag', node=hex(node), tag=name, local=local)
476 476 return
477 477
478 478 try:
479 479 fp = self.wfile('.hgtags', 'rb+')
480 480 except IOError, e:
481 481 if e.errno != errno.ENOENT:
482 482 raise
483 483 fp = self.wfile('.hgtags', 'ab')
484 484 else:
485 485 prevtags = fp.read()
486 486
487 487 # committed tags are stored in UTF-8
488 488 writetags(fp, names, encoding.fromlocal, prevtags)
489 489
490 490 fp.close()
491 491
492 492 self.invalidatecaches()
493 493
494 494 if '.hgtags' not in self.dirstate:
495 495 self[None].add(['.hgtags'])
496 496
497 497 m = matchmod.exact(self.root, '', ['.hgtags'])
498 498 tagnode = self.commit(message, user, date, extra=extra, match=m)
499 499
500 500 for name in names:
501 501 self.hook('tag', node=hex(node), tag=name, local=local)
502 502
503 503 return tagnode
504 504
505 505 def tag(self, names, node, message, local, user, date):
506 506 '''tag a revision with one or more symbolic names.
507 507
508 508 names is a list of strings or, when adding a single tag, names may be a
509 509 string.
510 510
511 511 if local is True, the tags are stored in a per-repository file.
512 512 otherwise, they are stored in the .hgtags file, and a new
513 513 changeset is committed with the change.
514 514
515 515 keyword arguments:
516 516
517 517 local: whether to store tags in non-version-controlled file
518 518 (default False)
519 519
520 520 message: commit message to use if committing
521 521
522 522 user: name of user to use if committing
523 523
524 524 date: date tuple to use if committing'''
525 525
526 526 if not local:
527 527 for x in self.status()[:5]:
528 528 if '.hgtags' in x:
529 529 raise util.Abort(_('working copy of .hgtags is changed '
530 530 '(please commit .hgtags manually)'))
531 531
532 532 self.tags() # instantiate the cache
533 533 self._tag(names, node, message, local, user, date)
534 534
535 535 @filteredpropertycache
536 536 def _tagscache(self):
537 537 '''Returns a tagscache object that contains various tags related
538 538 caches.'''
539 539
540 540 # This simplifies its cache management by having one decorated
541 541 # function (this one) and the rest simply fetch things from it.
542 542 class tagscache(object):
543 543 def __init__(self):
544 544 # These two define the set of tags for this repository. tags
545 545 # maps tag name to node; tagtypes maps tag name to 'global' or
546 546 # 'local'. (Global tags are defined by .hgtags across all
547 547 # heads, and local tags are defined in .hg/localtags.)
548 548 # They constitute the in-memory cache of tags.
549 549 self.tags = self.tagtypes = None
550 550
551 551 self.nodetagscache = self.tagslist = None
552 552
553 553 cache = tagscache()
554 554 cache.tags, cache.tagtypes = self._findtags()
555 555
556 556 return cache
557 557
558 558 def tags(self):
559 559 '''return a mapping of tag to node'''
560 560 t = {}
561 561 if self.changelog.filteredrevs:
562 562 tags, tt = self._findtags()
563 563 else:
564 564 tags = self._tagscache.tags
565 565 for k, v in tags.iteritems():
566 566 try:
567 567 # ignore tags to unknown nodes
568 568 self.changelog.rev(v)
569 569 t[k] = v
570 570 except (error.LookupError, ValueError):
571 571 pass
572 572 return t
573 573
574 574 def _findtags(self):
575 575 '''Do the hard work of finding tags. Return a pair of dicts
576 576 (tags, tagtypes) where tags maps tag name to node, and tagtypes
577 577 maps tag name to a string like \'global\' or \'local\'.
578 578 Subclasses or extensions are free to add their own tags, but
579 579 should be aware that the returned dicts will be retained for the
580 580 duration of the localrepo object.'''
581 581
582 582 # XXX what tagtype should subclasses/extensions use? Currently
583 583 # mq and bookmarks add tags, but do not set the tagtype at all.
584 584 # Should each extension invent its own tag type? Should there
585 585 # be one tagtype for all such "virtual" tags? Or is the status
586 586 # quo fine?
587 587
588 588 alltags = {} # map tag name to (node, hist)
589 589 tagtypes = {}
590 590
591 591 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
592 592 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
593 593
594 594 # Build the return dicts. Have to re-encode tag names because
595 595 # the tags module always uses UTF-8 (in order not to lose info
596 596 # writing to the cache), but the rest of Mercurial wants them in
597 597 # local encoding.
598 598 tags = {}
599 599 for (name, (node, hist)) in alltags.iteritems():
600 600 if node != nullid:
601 601 tags[encoding.tolocal(name)] = node
602 602 tags['tip'] = self.changelog.tip()
603 603 tagtypes = dict([(encoding.tolocal(name), value)
604 604 for (name, value) in tagtypes.iteritems()])
605 605 return (tags, tagtypes)
606 606
607 607 def tagtype(self, tagname):
608 608 '''
609 609 return the type of the given tag. result can be:
610 610
611 611 'local' : a local tag
612 612 'global' : a global tag
613 613 None : tag does not exist
614 614 '''
615 615
616 616 return self._tagscache.tagtypes.get(tagname)
617 617
618 618 def tagslist(self):
619 619 '''return a list of tags ordered by revision'''
620 620 if not self._tagscache.tagslist:
621 621 l = []
622 622 for t, n in self.tags().iteritems():
623 623 r = self.changelog.rev(n)
624 624 l.append((r, t, n))
625 625 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
626 626
627 627 return self._tagscache.tagslist
628 628
629 629 def nodetags(self, node):
630 630 '''return the tags associated with a node'''
631 631 if not self._tagscache.nodetagscache:
632 632 nodetagscache = {}
633 633 for t, n in self._tagscache.tags.iteritems():
634 634 nodetagscache.setdefault(n, []).append(t)
635 635 for tags in nodetagscache.itervalues():
636 636 tags.sort()
637 637 self._tagscache.nodetagscache = nodetagscache
638 638 return self._tagscache.nodetagscache.get(node, [])
639 639
640 640 def nodebookmarks(self, node):
641 641 marks = []
642 642 for bookmark, n in self._bookmarks.iteritems():
643 643 if n == node:
644 644 marks.append(bookmark)
645 645 return sorted(marks)
646 646
647 647 def branchmap(self):
648 648 '''returns a dictionary {branch: [branchheads]}'''
649 649 branchmap.updatecache(self)
650 650 return self._branchcaches[self.filtername]
651 651
652 652
653 653 def _branchtip(self, heads):
654 654 '''return the tipmost branch head in heads'''
655 655 tip = heads[-1]
656 656 for h in reversed(heads):
657 657 if not self[h].closesbranch():
658 658 tip = h
659 659 break
660 660 return tip
661 661
662 662 def branchtip(self, branch):
663 663 '''return the tip node for a given branch'''
664 664 if branch not in self.branchmap():
665 665 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
666 666 return self._branchtip(self.branchmap()[branch])
667 667
668 668 def branchtags(self):
669 669 '''return a dict where branch names map to the tipmost head of
670 670 the branch, open heads come before closed'''
671 671 bt = {}
672 672 for bn, heads in self.branchmap().iteritems():
673 673 bt[bn] = self._branchtip(heads)
674 674 return bt
675 675
676 676 def lookup(self, key):
677 677 return self[key].node()
678 678
679 679 def lookupbranch(self, key, remote=None):
680 680 repo = remote or self
681 681 if key in repo.branchmap():
682 682 return key
683 683
684 684 repo = (remote and remote.local()) and remote or self
685 685 return repo[key].branch()
686 686
687 687 def known(self, nodes):
688 688 nm = self.changelog.nodemap
689 689 pc = self._phasecache
690 690 result = []
691 691 for n in nodes:
692 692 r = nm.get(n)
693 693 resp = not (r is None or pc.phase(self, r) >= phases.secret)
694 694 result.append(resp)
695 695 return result
696 696
697 697 def local(self):
698 698 return self
699 699
700 700 def cancopy(self):
701 701 return self.local() # so statichttprepo's override of local() works
702 702
703 703 def join(self, f):
704 704 return os.path.join(self.path, f)
705 705
706 706 def wjoin(self, f):
707 707 return os.path.join(self.root, f)
708 708
709 709 def file(self, f):
710 710 if f[0] == '/':
711 711 f = f[1:]
712 712 return filelog.filelog(self.sopener, f)
713 713
714 714 def changectx(self, changeid):
715 715 return self[changeid]
716 716
717 717 def parents(self, changeid=None):
718 718 '''get list of changectxs for parents of changeid'''
719 719 return self[changeid].parents()
720 720
721 721 def setparents(self, p1, p2=nullid):
722 722 copies = self.dirstate.setparents(p1, p2)
723 723 pctx = self[p1]
724 724 if copies:
725 725 # Adjust copy records, the dirstate cannot do it, it
726 726 # requires access to parents manifests. Preserve them
727 727 # only for entries added to first parent.
728 728 for f in copies:
729 729 if f not in pctx and copies[f] in pctx:
730 730 self.dirstate.copy(copies[f], f)
731 731 if p2 == nullid:
732 732 for f, s in sorted(self.dirstate.copies().items()):
733 733 if f not in pctx and s not in pctx:
734 734 self.dirstate.copy(None, f)
735 735
736 736 def filectx(self, path, changeid=None, fileid=None):
737 737 """changeid can be a changeset revision, node, or tag.
738 738 fileid can be a file revision or node."""
739 739 return context.filectx(self, path, changeid, fileid)
740 740
741 741 def getcwd(self):
742 742 return self.dirstate.getcwd()
743 743
744 744 def pathto(self, f, cwd=None):
745 745 return self.dirstate.pathto(f, cwd)
746 746
747 747 def wfile(self, f, mode='r'):
748 748 return self.wopener(f, mode)
749 749
750 750 def _link(self, f):
751 751 return self.wvfs.islink(f)
752 752
753 753 def _loadfilter(self, filter):
754 754 if filter not in self.filterpats:
755 755 l = []
756 756 for pat, cmd in self.ui.configitems(filter):
757 757 if cmd == '!':
758 758 continue
759 759 mf = matchmod.match(self.root, '', [pat])
760 760 fn = None
761 761 params = cmd
762 762 for name, filterfn in self._datafilters.iteritems():
763 763 if cmd.startswith(name):
764 764 fn = filterfn
765 765 params = cmd[len(name):].lstrip()
766 766 break
767 767 if not fn:
768 768 fn = lambda s, c, **kwargs: util.filter(s, c)
769 769 # Wrap old filters not supporting keyword arguments
770 770 if not inspect.getargspec(fn)[2]:
771 771 oldfn = fn
772 772 fn = lambda s, c, **kwargs: oldfn(s, c)
773 773 l.append((mf, fn, params))
774 774 self.filterpats[filter] = l
775 775 return self.filterpats[filter]
776 776
777 777 def _filter(self, filterpats, filename, data):
778 778 for mf, fn, cmd in filterpats:
779 779 if mf(filename):
780 780 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
781 781 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
782 782 break
783 783
784 784 return data
785 785
786 786 @unfilteredpropertycache
787 787 def _encodefilterpats(self):
788 788 return self._loadfilter('encode')
789 789
790 790 @unfilteredpropertycache
791 791 def _decodefilterpats(self):
792 792 return self._loadfilter('decode')
793 793
794 794 def adddatafilter(self, name, filter):
795 795 self._datafilters[name] = filter
796 796
797 797 def wread(self, filename):
798 798 if self._link(filename):
799 799 data = self.wvfs.readlink(filename)
800 800 else:
801 801 data = self.wopener.read(filename)
802 802 return self._filter(self._encodefilterpats, filename, data)
803 803
804 804 def wwrite(self, filename, data, flags):
805 805 data = self._filter(self._decodefilterpats, filename, data)
806 806 if 'l' in flags:
807 807 self.wopener.symlink(data, filename)
808 808 else:
809 809 self.wopener.write(filename, data)
810 810 if 'x' in flags:
811 811 self.wvfs.setflags(filename, False, True)
812 812
813 813 def wwritedata(self, filename, data):
814 814 return self._filter(self._decodefilterpats, filename, data)
815 815
816 def transaction(self, desc):
816 def transaction(self, desc, report=None):
817 817 tr = self._transref and self._transref() or None
818 818 if tr and tr.running():
819 819 return tr.nest()
820 820
821 821 # abort here if the journal already exists
822 822 if self.svfs.exists("journal"):
823 823 raise error.RepoError(
824 824 _("abandoned transaction found - run hg recover"))
825 825
826 826 self._writejournal(desc)
827 827 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
828
829 tr = transaction.transaction(self.ui.warn, self.sopener,
828 rp = report and report or self.ui.warn
829 tr = transaction.transaction(rp, self.sopener,
830 830 self.sjoin("journal"),
831 831 aftertrans(renames),
832 832 self.store.createmode)
833 833 self._transref = weakref.ref(tr)
834 834 return tr
835 835
836 836 def _journalfiles(self):
837 837 return ((self.svfs, 'journal'),
838 838 (self.vfs, 'journal.dirstate'),
839 839 (self.vfs, 'journal.branch'),
840 840 (self.vfs, 'journal.desc'),
841 841 (self.vfs, 'journal.bookmarks'),
842 842 (self.svfs, 'journal.phaseroots'))
843 843
844 844 def undofiles(self):
845 845 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
846 846
847 847 def _writejournal(self, desc):
848 848 self.opener.write("journal.dirstate",
849 849 self.opener.tryread("dirstate"))
850 850 self.opener.write("journal.branch",
851 851 encoding.fromlocal(self.dirstate.branch()))
852 852 self.opener.write("journal.desc",
853 853 "%d\n%s\n" % (len(self), desc))
854 854 self.opener.write("journal.bookmarks",
855 855 self.opener.tryread("bookmarks"))
856 856 self.sopener.write("journal.phaseroots",
857 857 self.sopener.tryread("phaseroots"))
858 858
859 859 def recover(self):
860 860 lock = self.lock()
861 861 try:
862 862 if self.svfs.exists("journal"):
863 863 self.ui.status(_("rolling back interrupted transaction\n"))
864 864 transaction.rollback(self.sopener, self.sjoin("journal"),
865 865 self.ui.warn)
866 866 self.invalidate()
867 867 return True
868 868 else:
869 869 self.ui.warn(_("no interrupted transaction available\n"))
870 870 return False
871 871 finally:
872 872 lock.release()
873 873
874 874 def rollback(self, dryrun=False, force=False):
875 875 wlock = lock = None
876 876 try:
877 877 wlock = self.wlock()
878 878 lock = self.lock()
879 879 if self.svfs.exists("undo"):
880 880 return self._rollback(dryrun, force)
881 881 else:
882 882 self.ui.warn(_("no rollback information available\n"))
883 883 return 1
884 884 finally:
885 885 release(lock, wlock)
886 886
887 887 @unfilteredmethod # Until we get smarter cache management
888 888 def _rollback(self, dryrun, force):
889 889 ui = self.ui
890 890 try:
891 891 args = self.opener.read('undo.desc').splitlines()
892 892 (oldlen, desc, detail) = (int(args[0]), args[1], None)
893 893 if len(args) >= 3:
894 894 detail = args[2]
895 895 oldtip = oldlen - 1
896 896
897 897 if detail and ui.verbose:
898 898 msg = (_('repository tip rolled back to revision %s'
899 899 ' (undo %s: %s)\n')
900 900 % (oldtip, desc, detail))
901 901 else:
902 902 msg = (_('repository tip rolled back to revision %s'
903 903 ' (undo %s)\n')
904 904 % (oldtip, desc))
905 905 except IOError:
906 906 msg = _('rolling back unknown transaction\n')
907 907 desc = None
908 908
909 909 if not force and self['.'] != self['tip'] and desc == 'commit':
910 910 raise util.Abort(
911 911 _('rollback of last commit while not checked out '
912 912 'may lose data'), hint=_('use -f to force'))
913 913
914 914 ui.status(msg)
915 915 if dryrun:
916 916 return 0
917 917
918 918 parents = self.dirstate.parents()
919 919 self.destroying()
920 920 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
921 921 if self.vfs.exists('undo.bookmarks'):
922 922 self.vfs.rename('undo.bookmarks', 'bookmarks')
923 923 if self.svfs.exists('undo.phaseroots'):
924 924 self.svfs.rename('undo.phaseroots', 'phaseroots')
925 925 self.invalidate()
926 926
927 927 parentgone = (parents[0] not in self.changelog.nodemap or
928 928 parents[1] not in self.changelog.nodemap)
929 929 if parentgone:
930 930 self.vfs.rename('undo.dirstate', 'dirstate')
931 931 try:
932 932 branch = self.opener.read('undo.branch')
933 933 self.dirstate.setbranch(encoding.tolocal(branch))
934 934 except IOError:
935 935 ui.warn(_('named branch could not be reset: '
936 936 'current branch is still \'%s\'\n')
937 937 % self.dirstate.branch())
938 938
939 939 self.dirstate.invalidate()
940 940 parents = tuple([p.rev() for p in self.parents()])
941 941 if len(parents) > 1:
942 942 ui.status(_('working directory now based on '
943 943 'revisions %d and %d\n') % parents)
944 944 else:
945 945 ui.status(_('working directory now based on '
946 946 'revision %d\n') % parents)
947 947 # TODO: if we know which new heads may result from this rollback, pass
948 948 # them to destroy(), which will prevent the branchhead cache from being
949 949 # invalidated.
950 950 self.destroyed()
951 951 return 0
952 952
953 953 def invalidatecaches(self):
954 954
955 955 if '_tagscache' in vars(self):
956 956 # can't use delattr on proxy
957 957 del self.__dict__['_tagscache']
958 958
959 959 self.unfiltered()._branchcaches.clear()
960 960 self.invalidatevolatilesets()
961 961
962 962 def invalidatevolatilesets(self):
963 963 self.filteredrevcache.clear()
964 964 obsolete.clearobscaches(self)
965 965
966 966 def invalidatedirstate(self):
967 967 '''Invalidates the dirstate, causing the next call to dirstate
968 968 to check if it was modified since the last time it was read,
969 969 rereading it if it has.
970 970
971 971 This is different to dirstate.invalidate() that it doesn't always
972 972 rereads the dirstate. Use dirstate.invalidate() if you want to
973 973 explicitly read the dirstate again (i.e. restoring it to a previous
974 974 known good state).'''
975 975 if hasunfilteredcache(self, 'dirstate'):
976 976 for k in self.dirstate._filecache:
977 977 try:
978 978 delattr(self.dirstate, k)
979 979 except AttributeError:
980 980 pass
981 981 delattr(self.unfiltered(), 'dirstate')
982 982
983 983 def invalidate(self):
984 984 unfiltered = self.unfiltered() # all file caches are stored unfiltered
985 985 for k in self._filecache:
986 986 # dirstate is invalidated separately in invalidatedirstate()
987 987 if k == 'dirstate':
988 988 continue
989 989
990 990 try:
991 991 delattr(unfiltered, k)
992 992 except AttributeError:
993 993 pass
994 994 self.invalidatecaches()
995 995
996 996 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
997 997 try:
998 998 l = lock.lock(lockname, 0, releasefn, desc=desc)
999 999 except error.LockHeld, inst:
1000 1000 if not wait:
1001 1001 raise
1002 1002 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1003 1003 (desc, inst.locker))
1004 1004 # default to 600 seconds timeout
1005 1005 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1006 1006 releasefn, desc=desc)
1007 1007 if acquirefn:
1008 1008 acquirefn()
1009 1009 return l
1010 1010
1011 1011 def _afterlock(self, callback):
1012 1012 """add a callback to the current repository lock.
1013 1013
1014 1014 The callback will be executed on lock release."""
1015 1015 l = self._lockref and self._lockref()
1016 1016 if l:
1017 1017 l.postrelease.append(callback)
1018 1018 else:
1019 1019 callback()
1020 1020
1021 1021 def lock(self, wait=True):
1022 1022 '''Lock the repository store (.hg/store) and return a weak reference
1023 1023 to the lock. Use this before modifying the store (e.g. committing or
1024 1024 stripping). If you are opening a transaction, get a lock as well.)'''
1025 1025 l = self._lockref and self._lockref()
1026 1026 if l is not None and l.held:
1027 1027 l.lock()
1028 1028 return l
1029 1029
1030 1030 def unlock():
1031 1031 self.store.write()
1032 1032 if hasunfilteredcache(self, '_phasecache'):
1033 1033 self._phasecache.write()
1034 1034 for k, ce in self._filecache.items():
1035 1035 if k == 'dirstate' or k not in self.__dict__:
1036 1036 continue
1037 1037 ce.refresh()
1038 1038
1039 1039 l = self._lock(self.sjoin("lock"), wait, unlock,
1040 1040 self.invalidate, _('repository %s') % self.origroot)
1041 1041 self._lockref = weakref.ref(l)
1042 1042 return l
1043 1043
1044 1044 def wlock(self, wait=True):
1045 1045 '''Lock the non-store parts of the repository (everything under
1046 1046 .hg except .hg/store) and return a weak reference to the lock.
1047 1047 Use this before modifying files in .hg.'''
1048 1048 l = self._wlockref and self._wlockref()
1049 1049 if l is not None and l.held:
1050 1050 l.lock()
1051 1051 return l
1052 1052
1053 1053 def unlock():
1054 1054 self.dirstate.write()
1055 1055 self._filecache['dirstate'].refresh()
1056 1056
1057 1057 l = self._lock(self.join("wlock"), wait, unlock,
1058 1058 self.invalidatedirstate, _('working directory of %s') %
1059 1059 self.origroot)
1060 1060 self._wlockref = weakref.ref(l)
1061 1061 return l
1062 1062
1063 1063 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1064 1064 """
1065 1065 commit an individual file as part of a larger transaction
1066 1066 """
1067 1067
1068 1068 fname = fctx.path()
1069 1069 text = fctx.data()
1070 1070 flog = self.file(fname)
1071 1071 fparent1 = manifest1.get(fname, nullid)
1072 1072 fparent2 = fparent2o = manifest2.get(fname, nullid)
1073 1073
1074 1074 meta = {}
1075 1075 copy = fctx.renamed()
1076 1076 if copy and copy[0] != fname:
1077 1077 # Mark the new revision of this file as a copy of another
1078 1078 # file. This copy data will effectively act as a parent
1079 1079 # of this new revision. If this is a merge, the first
1080 1080 # parent will be the nullid (meaning "look up the copy data")
1081 1081 # and the second one will be the other parent. For example:
1082 1082 #
1083 1083 # 0 --- 1 --- 3 rev1 changes file foo
1084 1084 # \ / rev2 renames foo to bar and changes it
1085 1085 # \- 2 -/ rev3 should have bar with all changes and
1086 1086 # should record that bar descends from
1087 1087 # bar in rev2 and foo in rev1
1088 1088 #
1089 1089 # this allows this merge to succeed:
1090 1090 #
1091 1091 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1092 1092 # \ / merging rev3 and rev4 should use bar@rev2
1093 1093 # \- 2 --- 4 as the merge base
1094 1094 #
1095 1095
1096 1096 cfname = copy[0]
1097 1097 crev = manifest1.get(cfname)
1098 1098 newfparent = fparent2
1099 1099
1100 1100 if manifest2: # branch merge
1101 1101 if fparent2 == nullid or crev is None: # copied on remote side
1102 1102 if cfname in manifest2:
1103 1103 crev = manifest2[cfname]
1104 1104 newfparent = fparent1
1105 1105
1106 1106 # find source in nearest ancestor if we've lost track
1107 1107 if not crev:
1108 1108 self.ui.debug(" %s: searching for copy revision for %s\n" %
1109 1109 (fname, cfname))
1110 1110 for ancestor in self[None].ancestors():
1111 1111 if cfname in ancestor:
1112 1112 crev = ancestor[cfname].filenode()
1113 1113 break
1114 1114
1115 1115 if crev:
1116 1116 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1117 1117 meta["copy"] = cfname
1118 1118 meta["copyrev"] = hex(crev)
1119 1119 fparent1, fparent2 = nullid, newfparent
1120 1120 else:
1121 1121 self.ui.warn(_("warning: can't find ancestor for '%s' "
1122 1122 "copied from '%s'!\n") % (fname, cfname))
1123 1123
1124 1124 elif fparent2 != nullid:
1125 1125 # is one parent an ancestor of the other?
1126 1126 fparentancestor = flog.ancestor(fparent1, fparent2)
1127 1127 if fparentancestor == fparent1:
1128 1128 fparent1, fparent2 = fparent2, nullid
1129 1129 elif fparentancestor == fparent2:
1130 1130 fparent2 = nullid
1131 1131
1132 1132 # is the file changed?
1133 1133 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1134 1134 changelist.append(fname)
1135 1135 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1136 1136
1137 1137 # are just the flags changed during merge?
1138 1138 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1139 1139 changelist.append(fname)
1140 1140
1141 1141 return fparent1
1142 1142
1143 1143 @unfilteredmethod
1144 1144 def commit(self, text="", user=None, date=None, match=None, force=False,
1145 1145 editor=False, extra={}):
1146 1146 """Add a new revision to current repository.
1147 1147
1148 1148 Revision information is gathered from the working directory,
1149 1149 match can be used to filter the committed files. If editor is
1150 1150 supplied, it is called to get a commit message.
1151 1151 """
1152 1152
1153 1153 def fail(f, msg):
1154 1154 raise util.Abort('%s: %s' % (f, msg))
1155 1155
1156 1156 if not match:
1157 1157 match = matchmod.always(self.root, '')
1158 1158
1159 1159 if not force:
1160 1160 vdirs = []
1161 1161 match.explicitdir = vdirs.append
1162 1162 match.bad = fail
1163 1163
1164 1164 wlock = self.wlock()
1165 1165 try:
1166 1166 wctx = self[None]
1167 1167 merge = len(wctx.parents()) > 1
1168 1168
1169 1169 if (not force and merge and match and
1170 1170 (match.files() or match.anypats())):
1171 1171 raise util.Abort(_('cannot partially commit a merge '
1172 1172 '(do not specify files or patterns)'))
1173 1173
1174 1174 changes = self.status(match=match, clean=force)
1175 1175 if force:
1176 1176 changes[0].extend(changes[6]) # mq may commit unchanged files
1177 1177
1178 1178 # check subrepos
1179 1179 subs = []
1180 1180 commitsubs = set()
1181 1181 newstate = wctx.substate.copy()
1182 1182 # only manage subrepos and .hgsubstate if .hgsub is present
1183 1183 if '.hgsub' in wctx:
1184 1184 # we'll decide whether to track this ourselves, thanks
1185 1185 if '.hgsubstate' in changes[0]:
1186 1186 changes[0].remove('.hgsubstate')
1187 1187 if '.hgsubstate' in changes[2]:
1188 1188 changes[2].remove('.hgsubstate')
1189 1189
1190 1190 # compare current state to last committed state
1191 1191 # build new substate based on last committed state
1192 1192 oldstate = wctx.p1().substate
1193 1193 for s in sorted(newstate.keys()):
1194 1194 if not match(s):
1195 1195 # ignore working copy, use old state if present
1196 1196 if s in oldstate:
1197 1197 newstate[s] = oldstate[s]
1198 1198 continue
1199 1199 if not force:
1200 1200 raise util.Abort(
1201 1201 _("commit with new subrepo %s excluded") % s)
1202 1202 if wctx.sub(s).dirty(True):
1203 1203 if not self.ui.configbool('ui', 'commitsubrepos'):
1204 1204 raise util.Abort(
1205 1205 _("uncommitted changes in subrepo %s") % s,
1206 1206 hint=_("use --subrepos for recursive commit"))
1207 1207 subs.append(s)
1208 1208 commitsubs.add(s)
1209 1209 else:
1210 1210 bs = wctx.sub(s).basestate()
1211 1211 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1212 1212 if oldstate.get(s, (None, None, None))[1] != bs:
1213 1213 subs.append(s)
1214 1214
1215 1215 # check for removed subrepos
1216 1216 for p in wctx.parents():
1217 1217 r = [s for s in p.substate if s not in newstate]
1218 1218 subs += [s for s in r if match(s)]
1219 1219 if subs:
1220 1220 if (not match('.hgsub') and
1221 1221 '.hgsub' in (wctx.modified() + wctx.added())):
1222 1222 raise util.Abort(
1223 1223 _("can't commit subrepos without .hgsub"))
1224 1224 changes[0].insert(0, '.hgsubstate')
1225 1225
1226 1226 elif '.hgsub' in changes[2]:
1227 1227 # clean up .hgsubstate when .hgsub is removed
1228 1228 if ('.hgsubstate' in wctx and
1229 1229 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1230 1230 changes[2].insert(0, '.hgsubstate')
1231 1231
1232 1232 # make sure all explicit patterns are matched
1233 1233 if not force and match.files():
1234 1234 matched = set(changes[0] + changes[1] + changes[2])
1235 1235
1236 1236 for f in match.files():
1237 1237 f = self.dirstate.normalize(f)
1238 1238 if f == '.' or f in matched or f in wctx.substate:
1239 1239 continue
1240 1240 if f in changes[3]: # missing
1241 1241 fail(f, _('file not found!'))
1242 1242 if f in vdirs: # visited directory
1243 1243 d = f + '/'
1244 1244 for mf in matched:
1245 1245 if mf.startswith(d):
1246 1246 break
1247 1247 else:
1248 1248 fail(f, _("no match under directory!"))
1249 1249 elif f not in self.dirstate:
1250 1250 fail(f, _("file not tracked!"))
1251 1251
1252 1252 cctx = context.workingctx(self, text, user, date, extra, changes)
1253 1253
1254 1254 if (not force and not extra.get("close") and not merge
1255 1255 and not cctx.files()
1256 1256 and wctx.branch() == wctx.p1().branch()):
1257 1257 return None
1258 1258
1259 1259 if merge and cctx.deleted():
1260 1260 raise util.Abort(_("cannot commit merge with missing files"))
1261 1261
1262 1262 ms = mergemod.mergestate(self)
1263 1263 for f in changes[0]:
1264 1264 if f in ms and ms[f] == 'u':
1265 1265 raise util.Abort(_("unresolved merge conflicts "
1266 1266 "(see hg help resolve)"))
1267 1267
1268 1268 if editor:
1269 1269 cctx._text = editor(self, cctx, subs)
1270 1270 edited = (text != cctx._text)
1271 1271
1272 1272 # commit subs and write new state
1273 1273 if subs:
1274 1274 for s in sorted(commitsubs):
1275 1275 sub = wctx.sub(s)
1276 1276 self.ui.status(_('committing subrepository %s\n') %
1277 1277 subrepo.subrelpath(sub))
1278 1278 sr = sub.commit(cctx._text, user, date)
1279 1279 newstate[s] = (newstate[s][0], sr)
1280 1280 subrepo.writestate(self, newstate)
1281 1281
1282 1282 # Save commit message in case this transaction gets rolled back
1283 1283 # (e.g. by a pretxncommit hook). Leave the content alone on
1284 1284 # the assumption that the user will use the same editor again.
1285 1285 msgfn = self.savecommitmessage(cctx._text)
1286 1286
1287 1287 p1, p2 = self.dirstate.parents()
1288 1288 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1289 1289 try:
1290 1290 self.hook("precommit", throw=True, parent1=hookp1,
1291 1291 parent2=hookp2)
1292 1292 ret = self.commitctx(cctx, True)
1293 1293 except: # re-raises
1294 1294 if edited:
1295 1295 self.ui.write(
1296 1296 _('note: commit message saved in %s\n') % msgfn)
1297 1297 raise
1298 1298
1299 1299 # update bookmarks, dirstate and mergestate
1300 1300 bookmarks.update(self, [p1, p2], ret)
1301 1301 cctx.markcommitted(ret)
1302 1302 ms.reset()
1303 1303 finally:
1304 1304 wlock.release()
1305 1305
1306 1306 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1307 1307 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1308 1308 self._afterlock(commithook)
1309 1309 return ret
1310 1310
1311 1311 @unfilteredmethod
1312 1312 def commitctx(self, ctx, error=False):
1313 1313 """Add a new revision to current repository.
1314 1314 Revision information is passed via the context argument.
1315 1315 """
1316 1316
1317 1317 tr = lock = None
1318 1318 removed = list(ctx.removed())
1319 1319 p1, p2 = ctx.p1(), ctx.p2()
1320 1320 user = ctx.user()
1321 1321
1322 1322 lock = self.lock()
1323 1323 try:
1324 1324 tr = self.transaction("commit")
1325 1325 trp = weakref.proxy(tr)
1326 1326
1327 1327 if ctx.files():
1328 1328 m1 = p1.manifest().copy()
1329 1329 m2 = p2.manifest()
1330 1330
1331 1331 # check in files
1332 1332 new = {}
1333 1333 changed = []
1334 1334 linkrev = len(self)
1335 1335 for f in sorted(ctx.modified() + ctx.added()):
1336 1336 self.ui.note(f + "\n")
1337 1337 try:
1338 1338 fctx = ctx[f]
1339 1339 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1340 1340 changed)
1341 1341 m1.set(f, fctx.flags())
1342 1342 except OSError, inst:
1343 1343 self.ui.warn(_("trouble committing %s!\n") % f)
1344 1344 raise
1345 1345 except IOError, inst:
1346 1346 errcode = getattr(inst, 'errno', errno.ENOENT)
1347 1347 if error or errcode and errcode != errno.ENOENT:
1348 1348 self.ui.warn(_("trouble committing %s!\n") % f)
1349 1349 raise
1350 1350 else:
1351 1351 removed.append(f)
1352 1352
1353 1353 # update manifest
1354 1354 m1.update(new)
1355 1355 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1356 1356 drop = [f for f in removed if f in m1]
1357 1357 for f in drop:
1358 1358 del m1[f]
1359 1359 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1360 1360 p2.manifestnode(), (new, drop))
1361 1361 files = changed + removed
1362 1362 else:
1363 1363 mn = p1.manifestnode()
1364 1364 files = []
1365 1365
1366 1366 # update changelog
1367 1367 self.changelog.delayupdate()
1368 1368 n = self.changelog.add(mn, files, ctx.description(),
1369 1369 trp, p1.node(), p2.node(),
1370 1370 user, ctx.date(), ctx.extra().copy())
1371 1371 p = lambda: self.changelog.writepending() and self.root or ""
1372 1372 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1373 1373 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1374 1374 parent2=xp2, pending=p)
1375 1375 self.changelog.finalize(trp)
1376 1376 # set the new commit is proper phase
1377 1377 targetphase = phases.newcommitphase(self.ui)
1378 1378 if targetphase:
1379 1379 # retract boundary do not alter parent changeset.
1380 1380 # if a parent have higher the resulting phase will
1381 1381 # be compliant anyway
1382 1382 #
1383 1383 # if minimal phase was 0 we don't need to retract anything
1384 1384 phases.retractboundary(self, targetphase, [n])
1385 1385 tr.close()
1386 1386 branchmap.updatecache(self.filtered('served'))
1387 1387 return n
1388 1388 finally:
1389 1389 if tr:
1390 1390 tr.release()
1391 1391 lock.release()
1392 1392
1393 1393 @unfilteredmethod
1394 1394 def destroying(self):
1395 1395 '''Inform the repository that nodes are about to be destroyed.
1396 1396 Intended for use by strip and rollback, so there's a common
1397 1397 place for anything that has to be done before destroying history.
1398 1398
1399 1399 This is mostly useful for saving state that is in memory and waiting
1400 1400 to be flushed when the current lock is released. Because a call to
1401 1401 destroyed is imminent, the repo will be invalidated causing those
1402 1402 changes to stay in memory (waiting for the next unlock), or vanish
1403 1403 completely.
1404 1404 '''
1405 1405 # When using the same lock to commit and strip, the phasecache is left
1406 1406 # dirty after committing. Then when we strip, the repo is invalidated,
1407 1407 # causing those changes to disappear.
1408 1408 if '_phasecache' in vars(self):
1409 1409 self._phasecache.write()
1410 1410
1411 1411 @unfilteredmethod
1412 1412 def destroyed(self):
1413 1413 '''Inform the repository that nodes have been destroyed.
1414 1414 Intended for use by strip and rollback, so there's a common
1415 1415 place for anything that has to be done after destroying history.
1416 1416 '''
1417 1417 # When one tries to:
1418 1418 # 1) destroy nodes thus calling this method (e.g. strip)
1419 1419 # 2) use phasecache somewhere (e.g. commit)
1420 1420 #
1421 1421 # then 2) will fail because the phasecache contains nodes that were
1422 1422 # removed. We can either remove phasecache from the filecache,
1423 1423 # causing it to reload next time it is accessed, or simply filter
1424 1424 # the removed nodes now and write the updated cache.
1425 1425 self._phasecache.filterunknown(self)
1426 1426 self._phasecache.write()
1427 1427
1428 1428 # update the 'served' branch cache to help read only server process
1429 1429 # Thanks to branchcache collaboration this is done from the nearest
1430 1430 # filtered subset and it is expected to be fast.
1431 1431 branchmap.updatecache(self.filtered('served'))
1432 1432
1433 1433 # Ensure the persistent tag cache is updated. Doing it now
1434 1434 # means that the tag cache only has to worry about destroyed
1435 1435 # heads immediately after a strip/rollback. That in turn
1436 1436 # guarantees that "cachetip == currenttip" (comparing both rev
1437 1437 # and node) always means no nodes have been added or destroyed.
1438 1438
1439 1439 # XXX this is suboptimal when qrefresh'ing: we strip the current
1440 1440 # head, refresh the tag cache, then immediately add a new head.
1441 1441 # But I think doing it this way is necessary for the "instant
1442 1442 # tag cache retrieval" case to work.
1443 1443 self.invalidate()
1444 1444
1445 1445 def walk(self, match, node=None):
1446 1446 '''
1447 1447 walk recursively through the directory tree or a given
1448 1448 changeset, finding all files matched by the match
1449 1449 function
1450 1450 '''
1451 1451 return self[node].walk(match)
1452 1452
1453 1453 def status(self, node1='.', node2=None, match=None,
1454 1454 ignored=False, clean=False, unknown=False,
1455 1455 listsubrepos=False):
1456 1456 """return status of files between two nodes or node and working
1457 1457 directory.
1458 1458
1459 1459 If node1 is None, use the first dirstate parent instead.
1460 1460 If node2 is None, compare node1 with working directory.
1461 1461 """
1462 1462
1463 1463 def mfmatches(ctx):
1464 1464 mf = ctx.manifest().copy()
1465 1465 if match.always():
1466 1466 return mf
1467 1467 for fn in mf.keys():
1468 1468 if not match(fn):
1469 1469 del mf[fn]
1470 1470 return mf
1471 1471
1472 1472 ctx1 = self[node1]
1473 1473 ctx2 = self[node2]
1474 1474
1475 1475 working = ctx2.rev() is None
1476 1476 parentworking = working and ctx1 == self['.']
1477 1477 match = match or matchmod.always(self.root, self.getcwd())
1478 1478 listignored, listclean, listunknown = ignored, clean, unknown
1479 1479
1480 1480 # load earliest manifest first for caching reasons
1481 1481 if not working and ctx2.rev() < ctx1.rev():
1482 1482 ctx2.manifest()
1483 1483
1484 1484 if not parentworking:
1485 1485 def bad(f, msg):
1486 1486 # 'f' may be a directory pattern from 'match.files()',
1487 1487 # so 'f not in ctx1' is not enough
1488 1488 if f not in ctx1 and f not in ctx1.dirs():
1489 1489 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1490 1490 match.bad = bad
1491 1491
1492 1492 if working: # we need to scan the working dir
1493 1493 subrepos = []
1494 1494 if '.hgsub' in self.dirstate:
1495 1495 subrepos = sorted(ctx2.substate)
1496 1496 s = self.dirstate.status(match, subrepos, listignored,
1497 1497 listclean, listunknown)
1498 1498 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1499 1499
1500 1500 # check for any possibly clean files
1501 1501 if parentworking and cmp:
1502 1502 fixup = []
1503 1503 # do a full compare of any files that might have changed
1504 1504 for f in sorted(cmp):
1505 1505 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1506 1506 or ctx1[f].cmp(ctx2[f])):
1507 1507 modified.append(f)
1508 1508 else:
1509 1509 fixup.append(f)
1510 1510
1511 1511 # update dirstate for files that are actually clean
1512 1512 if fixup:
1513 1513 if listclean:
1514 1514 clean += fixup
1515 1515
1516 1516 try:
1517 1517 # updating the dirstate is optional
1518 1518 # so we don't wait on the lock
1519 1519 wlock = self.wlock(False)
1520 1520 try:
1521 1521 for f in fixup:
1522 1522 self.dirstate.normal(f)
1523 1523 finally:
1524 1524 wlock.release()
1525 1525 except error.LockError:
1526 1526 pass
1527 1527
1528 1528 if not parentworking:
1529 1529 mf1 = mfmatches(ctx1)
1530 1530 if working:
1531 1531 # we are comparing working dir against non-parent
1532 1532 # generate a pseudo-manifest for the working dir
1533 1533 mf2 = mfmatches(self['.'])
1534 1534 for f in cmp + modified + added:
1535 1535 mf2[f] = None
1536 1536 mf2.set(f, ctx2.flags(f))
1537 1537 for f in removed:
1538 1538 if f in mf2:
1539 1539 del mf2[f]
1540 1540 else:
1541 1541 # we are comparing two revisions
1542 1542 deleted, unknown, ignored = [], [], []
1543 1543 mf2 = mfmatches(ctx2)
1544 1544
1545 1545 modified, added, clean = [], [], []
1546 1546 withflags = mf1.withflags() | mf2.withflags()
1547 1547 for fn, mf2node in mf2.iteritems():
1548 1548 if fn in mf1:
1549 1549 if (fn not in deleted and
1550 1550 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1551 1551 (mf1[fn] != mf2node and
1552 1552 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1553 1553 modified.append(fn)
1554 1554 elif listclean:
1555 1555 clean.append(fn)
1556 1556 del mf1[fn]
1557 1557 elif fn not in deleted:
1558 1558 added.append(fn)
1559 1559 removed = mf1.keys()
1560 1560
1561 1561 if working and modified and not self.dirstate._checklink:
1562 1562 # Symlink placeholders may get non-symlink-like contents
1563 1563 # via user error or dereferencing by NFS or Samba servers,
1564 1564 # so we filter out any placeholders that don't look like a
1565 1565 # symlink
1566 1566 sane = []
1567 1567 for f in modified:
1568 1568 if ctx2.flags(f) == 'l':
1569 1569 d = ctx2[f].data()
1570 1570 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1571 1571 self.ui.debug('ignoring suspect symlink placeholder'
1572 1572 ' "%s"\n' % f)
1573 1573 continue
1574 1574 sane.append(f)
1575 1575 modified = sane
1576 1576
1577 1577 r = modified, added, removed, deleted, unknown, ignored, clean
1578 1578
1579 1579 if listsubrepos:
1580 1580 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1581 1581 if working:
1582 1582 rev2 = None
1583 1583 else:
1584 1584 rev2 = ctx2.substate[subpath][1]
1585 1585 try:
1586 1586 submatch = matchmod.narrowmatcher(subpath, match)
1587 1587 s = sub.status(rev2, match=submatch, ignored=listignored,
1588 1588 clean=listclean, unknown=listunknown,
1589 1589 listsubrepos=True)
1590 1590 for rfiles, sfiles in zip(r, s):
1591 1591 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1592 1592 except error.LookupError:
1593 1593 self.ui.status(_("skipping missing subrepository: %s\n")
1594 1594 % subpath)
1595 1595
1596 1596 for l in r:
1597 1597 l.sort()
1598 1598 return r
1599 1599
1600 1600 def heads(self, start=None):
1601 1601 heads = self.changelog.heads(start)
1602 1602 # sort the output in rev descending order
1603 1603 return sorted(heads, key=self.changelog.rev, reverse=True)
1604 1604
1605 1605 def branchheads(self, branch=None, start=None, closed=False):
1606 1606 '''return a (possibly filtered) list of heads for the given branch
1607 1607
1608 1608 Heads are returned in topological order, from newest to oldest.
1609 1609 If branch is None, use the dirstate branch.
1610 1610 If start is not None, return only heads reachable from start.
1611 1611 If closed is True, return heads that are marked as closed as well.
1612 1612 '''
1613 1613 if branch is None:
1614 1614 branch = self[None].branch()
1615 1615 branches = self.branchmap()
1616 1616 if branch not in branches:
1617 1617 return []
1618 1618 # the cache returns heads ordered lowest to highest
1619 1619 bheads = list(reversed(branches[branch]))
1620 1620 if start is not None:
1621 1621 # filter out the heads that cannot be reached from startrev
1622 1622 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1623 1623 bheads = [h for h in bheads if h in fbheads]
1624 1624 if not closed:
1625 1625 bheads = [h for h in bheads if not self[h].closesbranch()]
1626 1626 return bheads
1627 1627
1628 1628 def branches(self, nodes):
1629 1629 if not nodes:
1630 1630 nodes = [self.changelog.tip()]
1631 1631 b = []
1632 1632 for n in nodes:
1633 1633 t = n
1634 1634 while True:
1635 1635 p = self.changelog.parents(n)
1636 1636 if p[1] != nullid or p[0] == nullid:
1637 1637 b.append((t, n, p[0], p[1]))
1638 1638 break
1639 1639 n = p[0]
1640 1640 return b
1641 1641
1642 1642 def between(self, pairs):
1643 1643 r = []
1644 1644
1645 1645 for top, bottom in pairs:
1646 1646 n, l, i = top, [], 0
1647 1647 f = 1
1648 1648
1649 1649 while n != bottom and n != nullid:
1650 1650 p = self.changelog.parents(n)[0]
1651 1651 if i == f:
1652 1652 l.append(n)
1653 1653 f = f * 2
1654 1654 n = p
1655 1655 i += 1
1656 1656
1657 1657 r.append(l)
1658 1658
1659 1659 return r
1660 1660
1661 1661 def pull(self, remote, heads=None, force=False):
1662 1662 if remote.local():
1663 1663 missing = set(remote.requirements) - self.supported
1664 1664 if missing:
1665 1665 msg = _("required features are not"
1666 1666 " supported in the destination:"
1667 1667 " %s") % (', '.join(sorted(missing)))
1668 1668 raise util.Abort(msg)
1669 1669
1670 1670 # don't open transaction for nothing or you break future useful
1671 1671 # rollback call
1672 1672 tr = None
1673 1673 trname = 'pull\n' + util.hidepassword(remote.url())
1674 1674 lock = self.lock()
1675 1675 try:
1676 1676 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1677 1677 force=force)
1678 1678 common, fetch, rheads = tmp
1679 1679 if not fetch:
1680 1680 self.ui.status(_("no changes found\n"))
1681 1681 added = []
1682 1682 result = 0
1683 1683 else:
1684 1684 tr = self.transaction(trname)
1685 1685 if heads is None and list(common) == [nullid]:
1686 1686 self.ui.status(_("requesting all changes\n"))
1687 1687 elif heads is None and remote.capable('changegroupsubset'):
1688 1688 # issue1320, avoid a race if remote changed after discovery
1689 1689 heads = rheads
1690 1690
1691 1691 if remote.capable('getbundle'):
1692 1692 # TODO: get bundlecaps from remote
1693 1693 cg = remote.getbundle('pull', common=common,
1694 1694 heads=heads or rheads)
1695 1695 elif heads is None:
1696 1696 cg = remote.changegroup(fetch, 'pull')
1697 1697 elif not remote.capable('changegroupsubset'):
1698 1698 raise util.Abort(_("partial pull cannot be done because "
1699 1699 "other repository doesn't support "
1700 1700 "changegroupsubset."))
1701 1701 else:
1702 1702 cg = remote.changegroupsubset(fetch, heads, 'pull')
1703 1703 # we use unfiltered changelog here because hidden revision must
1704 1704 # be taken in account for phase synchronization. They may
1705 1705 # becomes public and becomes visible again.
1706 1706 cl = self.unfiltered().changelog
1707 1707 clstart = len(cl)
1708 1708 result = self.addchangegroup(cg, 'pull', remote.url())
1709 1709 clend = len(cl)
1710 1710 added = [cl.node(r) for r in xrange(clstart, clend)]
1711 1711
1712 1712 # compute target subset
1713 1713 if heads is None:
1714 1714 # We pulled every thing possible
1715 1715 # sync on everything common
1716 1716 subset = common + added
1717 1717 else:
1718 1718 # We pulled a specific subset
1719 1719 # sync on this subset
1720 1720 subset = heads
1721 1721
1722 1722 # Get remote phases data from remote
1723 1723 remotephases = remote.listkeys('phases')
1724 1724 publishing = bool(remotephases.get('publishing', False))
1725 1725 if remotephases and not publishing:
1726 1726 # remote is new and unpublishing
1727 1727 pheads, _dr = phases.analyzeremotephases(self, subset,
1728 1728 remotephases)
1729 1729 phases.advanceboundary(self, phases.public, pheads)
1730 1730 phases.advanceboundary(self, phases.draft, subset)
1731 1731 else:
1732 1732 # Remote is old or publishing all common changesets
1733 1733 # should be seen as public
1734 1734 phases.advanceboundary(self, phases.public, subset)
1735 1735
1736 1736 def gettransaction():
1737 1737 if tr is None:
1738 1738 return self.transaction(trname)
1739 1739 return tr
1740 1740
1741 1741 obstr = obsolete.syncpull(self, remote, gettransaction)
1742 1742 if obstr is not None:
1743 1743 tr = obstr
1744 1744
1745 1745 if tr is not None:
1746 1746 tr.close()
1747 1747 finally:
1748 1748 if tr is not None:
1749 1749 tr.release()
1750 1750 lock.release()
1751 1751
1752 1752 return result
1753 1753
1754 1754 def checkpush(self, force, revs):
1755 1755 """Extensions can override this function if additional checks have
1756 1756 to be performed before pushing, or call it if they override push
1757 1757 command.
1758 1758 """
1759 1759 pass
1760 1760
1761 1761 def push(self, remote, force=False, revs=None, newbranch=False):
1762 1762 '''Push outgoing changesets (limited by revs) from the current
1763 1763 repository to remote. Return an integer:
1764 1764 - None means nothing to push
1765 1765 - 0 means HTTP error
1766 1766 - 1 means we pushed and remote head count is unchanged *or*
1767 1767 we have outgoing changesets but refused to push
1768 1768 - other values as described by addchangegroup()
1769 1769 '''
1770 1770 if remote.local():
1771 1771 missing = set(self.requirements) - remote.local().supported
1772 1772 if missing:
1773 1773 msg = _("required features are not"
1774 1774 " supported in the destination:"
1775 1775 " %s") % (', '.join(sorted(missing)))
1776 1776 raise util.Abort(msg)
1777 1777
1778 1778 # there are two ways to push to remote repo:
1779 1779 #
1780 1780 # addchangegroup assumes local user can lock remote
1781 1781 # repo (local filesystem, old ssh servers).
1782 1782 #
1783 1783 # unbundle assumes local user cannot lock remote repo (new ssh
1784 1784 # servers, http servers).
1785 1785
1786 1786 if not remote.canpush():
1787 1787 raise util.Abort(_("destination does not support push"))
1788 1788 unfi = self.unfiltered()
1789 1789 def localphasemove(nodes, phase=phases.public):
1790 1790 """move <nodes> to <phase> in the local source repo"""
1791 1791 if locallock is not None:
1792 1792 phases.advanceboundary(self, phase, nodes)
1793 1793 else:
1794 1794 # repo is not locked, do not change any phases!
1795 1795 # Informs the user that phases should have been moved when
1796 1796 # applicable.
1797 1797 actualmoves = [n for n in nodes if phase < self[n].phase()]
1798 1798 phasestr = phases.phasenames[phase]
1799 1799 if actualmoves:
1800 1800 self.ui.status(_('cannot lock source repo, skipping local'
1801 1801 ' %s phase update\n') % phasestr)
1802 1802 # get local lock as we might write phase data
1803 1803 locallock = None
1804 1804 try:
1805 1805 locallock = self.lock()
1806 1806 except IOError, err:
1807 1807 if err.errno != errno.EACCES:
1808 1808 raise
1809 1809 # source repo cannot be locked.
1810 1810 # We do not abort the push, but just disable the local phase
1811 1811 # synchronisation.
1812 1812 msg = 'cannot lock source repository: %s\n' % err
1813 1813 self.ui.debug(msg)
1814 1814 try:
1815 1815 self.checkpush(force, revs)
1816 1816 lock = None
1817 1817 unbundle = remote.capable('unbundle')
1818 1818 if not unbundle:
1819 1819 lock = remote.lock()
1820 1820 try:
1821 1821 # discovery
1822 1822 fci = discovery.findcommonincoming
1823 1823 commoninc = fci(unfi, remote, force=force)
1824 1824 common, inc, remoteheads = commoninc
1825 1825 fco = discovery.findcommonoutgoing
1826 1826 outgoing = fco(unfi, remote, onlyheads=revs,
1827 1827 commoninc=commoninc, force=force)
1828 1828
1829 1829
1830 1830 if not outgoing.missing:
1831 1831 # nothing to push
1832 1832 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1833 1833 ret = None
1834 1834 else:
1835 1835 # something to push
1836 1836 if not force:
1837 1837 # if self.obsstore == False --> no obsolete
1838 1838 # then, save the iteration
1839 1839 if unfi.obsstore:
1840 1840 # this message are here for 80 char limit reason
1841 1841 mso = _("push includes obsolete changeset: %s!")
1842 1842 mst = "push includes %s changeset: %s!"
1843 1843 # plain versions for i18n tool to detect them
1844 1844 _("push includes unstable changeset: %s!")
1845 1845 _("push includes bumped changeset: %s!")
1846 1846 _("push includes divergent changeset: %s!")
1847 1847 # If we are to push if there is at least one
1848 1848 # obsolete or unstable changeset in missing, at
1849 1849 # least one of the missinghead will be obsolete or
1850 1850 # unstable. So checking heads only is ok
1851 1851 for node in outgoing.missingheads:
1852 1852 ctx = unfi[node]
1853 1853 if ctx.obsolete():
1854 1854 raise util.Abort(mso % ctx)
1855 1855 elif ctx.troubled():
1856 1856 raise util.Abort(_(mst)
1857 1857 % (ctx.troubles()[0],
1858 1858 ctx))
1859 1859 discovery.checkheads(unfi, remote, outgoing,
1860 1860 remoteheads, newbranch,
1861 1861 bool(inc))
1862 1862
1863 1863 # TODO: get bundlecaps from remote
1864 1864 bundlecaps = None
1865 1865 # create a changegroup from local
1866 1866 if revs is None and not outgoing.excluded:
1867 1867 # push everything,
1868 1868 # use the fast path, no race possible on push
1869 1869 bundler = changegroup.bundle10(self, bundlecaps)
1870 1870 cg = self._changegroupsubset(outgoing,
1871 1871 bundler,
1872 1872 'push',
1873 1873 fastpath=True)
1874 1874 else:
1875 1875 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1876 1876
1877 1877 # apply changegroup to remote
1878 1878 if unbundle:
1879 1879 # local repo finds heads on server, finds out what
1880 1880 # revs it must push. once revs transferred, if server
1881 1881 # finds it has different heads (someone else won
1882 1882 # commit/push race), server aborts.
1883 1883 if force:
1884 1884 remoteheads = ['force']
1885 1885 # ssh: return remote's addchangegroup()
1886 1886 # http: return remote's addchangegroup() or 0 for error
1887 1887 ret = remote.unbundle(cg, remoteheads, 'push')
1888 1888 else:
1889 1889 # we return an integer indicating remote head count
1890 1890 # change
1891 1891 ret = remote.addchangegroup(cg, 'push', self.url())
1892 1892
1893 1893 if ret:
1894 1894 # push succeed, synchronize target of the push
1895 1895 cheads = outgoing.missingheads
1896 1896 elif revs is None:
1897 1897 # All out push fails. synchronize all common
1898 1898 cheads = outgoing.commonheads
1899 1899 else:
1900 1900 # I want cheads = heads(::missingheads and ::commonheads)
1901 1901 # (missingheads is revs with secret changeset filtered out)
1902 1902 #
1903 1903 # This can be expressed as:
1904 1904 # cheads = ( (missingheads and ::commonheads)
1905 1905 # + (commonheads and ::missingheads))"
1906 1906 # )
1907 1907 #
1908 1908 # while trying to push we already computed the following:
1909 1909 # common = (::commonheads)
1910 1910 # missing = ((commonheads::missingheads) - commonheads)
1911 1911 #
1912 1912 # We can pick:
1913 1913 # * missingheads part of common (::commonheads)
1914 1914 common = set(outgoing.common)
1915 1915 cheads = [node for node in revs if node in common]
1916 1916 # and
1917 1917 # * commonheads parents on missing
1918 1918 revset = unfi.set('%ln and parents(roots(%ln))',
1919 1919 outgoing.commonheads,
1920 1920 outgoing.missing)
1921 1921 cheads.extend(c.node() for c in revset)
1922 1922 # even when we don't push, exchanging phase data is useful
1923 1923 remotephases = remote.listkeys('phases')
1924 1924 if (self.ui.configbool('ui', '_usedassubrepo', False)
1925 1925 and remotephases # server supports phases
1926 1926 and ret is None # nothing was pushed
1927 1927 and remotephases.get('publishing', False)):
1928 1928 # When:
1929 1929 # - this is a subrepo push
1930 1930 # - and remote support phase
1931 1931 # - and no changeset was pushed
1932 1932 # - and remote is publishing
1933 1933 # We may be in issue 3871 case!
1934 1934 # We drop the possible phase synchronisation done by
1935 1935 # courtesy to publish changesets possibly locally draft
1936 1936 # on the remote.
1937 1937 remotephases = {'publishing': 'True'}
1938 1938 if not remotephases: # old server or public only repo
1939 1939 localphasemove(cheads)
1940 1940 # don't push any phase data as there is nothing to push
1941 1941 else:
1942 1942 ana = phases.analyzeremotephases(self, cheads, remotephases)
1943 1943 pheads, droots = ana
1944 1944 ### Apply remote phase on local
1945 1945 if remotephases.get('publishing', False):
1946 1946 localphasemove(cheads)
1947 1947 else: # publish = False
1948 1948 localphasemove(pheads)
1949 1949 localphasemove(cheads, phases.draft)
1950 1950 ### Apply local phase on remote
1951 1951
1952 1952 # Get the list of all revs draft on remote by public here.
1953 1953 # XXX Beware that revset break if droots is not strictly
1954 1954 # XXX root we may want to ensure it is but it is costly
1955 1955 outdated = unfi.set('heads((%ln::%ln) and public())',
1956 1956 droots, cheads)
1957 1957 for newremotehead in outdated:
1958 1958 r = remote.pushkey('phases',
1959 1959 newremotehead.hex(),
1960 1960 str(phases.draft),
1961 1961 str(phases.public))
1962 1962 if not r:
1963 1963 self.ui.warn(_('updating %s to public failed!\n')
1964 1964 % newremotehead)
1965 1965 self.ui.debug('try to push obsolete markers to remote\n')
1966 1966 obsolete.syncpush(self, remote)
1967 1967 finally:
1968 1968 if lock is not None:
1969 1969 lock.release()
1970 1970 finally:
1971 1971 if locallock is not None:
1972 1972 locallock.release()
1973 1973
1974 1974 self.ui.debug("checking for updated bookmarks\n")
1975 1975 rb = remote.listkeys('bookmarks')
1976 1976 revnums = map(unfi.changelog.rev, revs or [])
1977 1977 ancestors = [
1978 1978 a for a in unfi.changelog.ancestors(revnums, inclusive=True)]
1979 1979 for k in rb.keys():
1980 1980 if k in unfi._bookmarks:
1981 1981 nr, nl = rb[k], hex(self._bookmarks[k])
1982 1982 if nr in unfi:
1983 1983 cr = unfi[nr]
1984 1984 cl = unfi[nl]
1985 1985 if bookmarks.validdest(unfi, cr, cl):
1986 1986 if ancestors and cl.rev() not in ancestors:
1987 1987 continue
1988 1988 r = remote.pushkey('bookmarks', k, nr, nl)
1989 1989 if r:
1990 1990 self.ui.status(_("updating bookmark %s\n") % k)
1991 1991 else:
1992 1992 self.ui.warn(_('updating bookmark %s'
1993 1993 ' failed!\n') % k)
1994 1994
1995 1995 return ret
1996 1996
1997 1997 def changegroupinfo(self, nodes, source):
1998 1998 if self.ui.verbose or source == 'bundle':
1999 1999 self.ui.status(_("%d changesets found\n") % len(nodes))
2000 2000 if self.ui.debugflag:
2001 2001 self.ui.debug("list of changesets:\n")
2002 2002 for node in nodes:
2003 2003 self.ui.debug("%s\n" % hex(node))
2004 2004
2005 2005 def changegroupsubset(self, bases, heads, source):
2006 2006 """Compute a changegroup consisting of all the nodes that are
2007 2007 descendants of any of the bases and ancestors of any of the heads.
2008 2008 Return a chunkbuffer object whose read() method will return
2009 2009 successive changegroup chunks.
2010 2010
2011 2011 It is fairly complex as determining which filenodes and which
2012 2012 manifest nodes need to be included for the changeset to be complete
2013 2013 is non-trivial.
2014 2014
2015 2015 Another wrinkle is doing the reverse, figuring out which changeset in
2016 2016 the changegroup a particular filenode or manifestnode belongs to.
2017 2017 """
2018 2018 cl = self.changelog
2019 2019 if not bases:
2020 2020 bases = [nullid]
2021 2021 # TODO: remove call to nodesbetween.
2022 2022 csets, bases, heads = cl.nodesbetween(bases, heads)
2023 2023 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
2024 2024 outgoing = discovery.outgoing(cl, bases, heads)
2025 2025 bundler = changegroup.bundle10(self)
2026 2026 return self._changegroupsubset(outgoing, bundler, source)
2027 2027
2028 2028 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2029 2029 """Like getbundle, but taking a discovery.outgoing as an argument.
2030 2030
2031 2031 This is only implemented for local repos and reuses potentially
2032 2032 precomputed sets in outgoing."""
2033 2033 if not outgoing.missing:
2034 2034 return None
2035 2035 bundler = changegroup.bundle10(self, bundlecaps)
2036 2036 return self._changegroupsubset(outgoing, bundler, source)
2037 2037
2038 2038 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2039 2039 """Like changegroupsubset, but returns the set difference between the
2040 2040 ancestors of heads and the ancestors common.
2041 2041
2042 2042 If heads is None, use the local heads. If common is None, use [nullid].
2043 2043
2044 2044 The nodes in common might not all be known locally due to the way the
2045 2045 current discovery protocol works.
2046 2046 """
2047 2047 cl = self.changelog
2048 2048 if common:
2049 2049 hasnode = cl.hasnode
2050 2050 common = [n for n in common if hasnode(n)]
2051 2051 else:
2052 2052 common = [nullid]
2053 2053 if not heads:
2054 2054 heads = cl.heads()
2055 2055 return self.getlocalbundle(source,
2056 2056 discovery.outgoing(cl, common, heads),
2057 2057 bundlecaps=bundlecaps)
2058 2058
2059 2059 @unfilteredmethod
2060 2060 def _changegroupsubset(self, outgoing, bundler, source,
2061 2061 fastpath=False):
2062 2062 commonrevs = outgoing.common
2063 2063 csets = outgoing.missing
2064 2064 heads = outgoing.missingheads
2065 2065 # We go through the fast path if we get told to, or if all (unfiltered
2066 2066 # heads have been requested (since we then know there all linkrevs will
2067 2067 # be pulled by the client).
2068 2068 heads.sort()
2069 2069 fastpathlinkrev = fastpath or (
2070 2070 self.filtername is None and heads == sorted(self.heads()))
2071 2071
2072 2072 self.hook('preoutgoing', throw=True, source=source)
2073 2073 self.changegroupinfo(csets, source)
2074 2074 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2075 2075 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2076 2076
2077 2077 def changegroup(self, basenodes, source):
2078 2078 # to avoid a race we use changegroupsubset() (issue1320)
2079 2079 return self.changegroupsubset(basenodes, self.heads(), source)
2080 2080
2081 2081 @unfilteredmethod
2082 2082 def addchangegroup(self, source, srctype, url, emptyok=False):
2083 2083 """Add the changegroup returned by source.read() to this repo.
2084 2084 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2085 2085 the URL of the repo where this changegroup is coming from.
2086 2086
2087 2087 Return an integer summarizing the change to this repo:
2088 2088 - nothing changed or no source: 0
2089 2089 - more heads than before: 1+added heads (2..n)
2090 2090 - fewer heads than before: -1-removed heads (-2..-n)
2091 2091 - number of heads stays the same: 1
2092 2092 """
2093 2093 def csmap(x):
2094 2094 self.ui.debug("add changeset %s\n" % short(x))
2095 2095 return len(cl)
2096 2096
2097 2097 def revmap(x):
2098 2098 return cl.rev(x)
2099 2099
2100 2100 if not source:
2101 2101 return 0
2102 2102
2103 2103 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2104 2104
2105 2105 changesets = files = revisions = 0
2106 2106 efiles = set()
2107 2107
2108 2108 # write changelog data to temp files so concurrent readers will not see
2109 2109 # inconsistent view
2110 2110 cl = self.changelog
2111 2111 cl.delayupdate()
2112 2112 oldheads = cl.heads()
2113 2113
2114 2114 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2115 2115 try:
2116 2116 trp = weakref.proxy(tr)
2117 2117 # pull off the changeset group
2118 2118 self.ui.status(_("adding changesets\n"))
2119 2119 clstart = len(cl)
2120 2120 class prog(object):
2121 2121 step = _('changesets')
2122 2122 count = 1
2123 2123 ui = self.ui
2124 2124 total = None
2125 2125 def __call__(self):
2126 2126 self.ui.progress(self.step, self.count, unit=_('chunks'),
2127 2127 total=self.total)
2128 2128 self.count += 1
2129 2129 pr = prog()
2130 2130 source.callback = pr
2131 2131
2132 2132 source.changelogheader()
2133 2133 srccontent = cl.addgroup(source, csmap, trp)
2134 2134 if not (srccontent or emptyok):
2135 2135 raise util.Abort(_("received changelog group is empty"))
2136 2136 clend = len(cl)
2137 2137 changesets = clend - clstart
2138 2138 for c in xrange(clstart, clend):
2139 2139 efiles.update(self[c].files())
2140 2140 efiles = len(efiles)
2141 2141 self.ui.progress(_('changesets'), None)
2142 2142
2143 2143 # pull off the manifest group
2144 2144 self.ui.status(_("adding manifests\n"))
2145 2145 pr.step = _('manifests')
2146 2146 pr.count = 1
2147 2147 pr.total = changesets # manifests <= changesets
2148 2148 # no need to check for empty manifest group here:
2149 2149 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2150 2150 # no new manifest will be created and the manifest group will
2151 2151 # be empty during the pull
2152 2152 source.manifestheader()
2153 2153 self.manifest.addgroup(source, revmap, trp)
2154 2154 self.ui.progress(_('manifests'), None)
2155 2155
2156 2156 needfiles = {}
2157 2157 if self.ui.configbool('server', 'validate', default=False):
2158 2158 # validate incoming csets have their manifests
2159 2159 for cset in xrange(clstart, clend):
2160 2160 mfest = self.changelog.read(self.changelog.node(cset))[0]
2161 2161 mfest = self.manifest.readdelta(mfest)
2162 2162 # store file nodes we must see
2163 2163 for f, n in mfest.iteritems():
2164 2164 needfiles.setdefault(f, set()).add(n)
2165 2165
2166 2166 # process the files
2167 2167 self.ui.status(_("adding file changes\n"))
2168 2168 pr.step = _('files')
2169 2169 pr.count = 1
2170 2170 pr.total = efiles
2171 2171 source.callback = None
2172 2172
2173 2173 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2174 2174 pr, needfiles)
2175 2175 revisions += newrevs
2176 2176 files += newfiles
2177 2177
2178 2178 dh = 0
2179 2179 if oldheads:
2180 2180 heads = cl.heads()
2181 2181 dh = len(heads) - len(oldheads)
2182 2182 for h in heads:
2183 2183 if h not in oldheads and self[h].closesbranch():
2184 2184 dh -= 1
2185 2185 htext = ""
2186 2186 if dh:
2187 2187 htext = _(" (%+d heads)") % dh
2188 2188
2189 2189 self.ui.status(_("added %d changesets"
2190 2190 " with %d changes to %d files%s\n")
2191 2191 % (changesets, revisions, files, htext))
2192 2192 self.invalidatevolatilesets()
2193 2193
2194 2194 if changesets > 0:
2195 2195 p = lambda: cl.writepending() and self.root or ""
2196 2196 self.hook('pretxnchangegroup', throw=True,
2197 2197 node=hex(cl.node(clstart)), source=srctype,
2198 2198 url=url, pending=p)
2199 2199
2200 2200 added = [cl.node(r) for r in xrange(clstart, clend)]
2201 2201 publishing = self.ui.configbool('phases', 'publish', True)
2202 2202 if srctype == 'push':
2203 2203 # Old server can not push the boundary themself.
2204 2204 # New server won't push the boundary if changeset already
2205 2205 # existed locally as secrete
2206 2206 #
2207 2207 # We should not use added here but the list of all change in
2208 2208 # the bundle
2209 2209 if publishing:
2210 2210 phases.advanceboundary(self, phases.public, srccontent)
2211 2211 else:
2212 2212 phases.advanceboundary(self, phases.draft, srccontent)
2213 2213 phases.retractboundary(self, phases.draft, added)
2214 2214 elif srctype != 'strip':
2215 2215 # publishing only alter behavior during push
2216 2216 #
2217 2217 # strip should not touch boundary at all
2218 2218 phases.retractboundary(self, phases.draft, added)
2219 2219
2220 2220 # make changelog see real files again
2221 2221 cl.finalize(trp)
2222 2222
2223 2223 tr.close()
2224 2224
2225 2225 if changesets > 0:
2226 2226 if srctype != 'strip':
2227 2227 # During strip, branchcache is invalid but coming call to
2228 2228 # `destroyed` will repair it.
2229 2229 # In other case we can safely update cache on disk.
2230 2230 branchmap.updatecache(self.filtered('served'))
2231 2231 def runhooks():
2232 2232 # forcefully update the on-disk branch cache
2233 2233 self.ui.debug("updating the branch cache\n")
2234 2234 self.hook("changegroup", node=hex(cl.node(clstart)),
2235 2235 source=srctype, url=url)
2236 2236
2237 2237 for n in added:
2238 2238 self.hook("incoming", node=hex(n), source=srctype,
2239 2239 url=url)
2240 2240
2241 2241 newheads = [h for h in self.heads() if h not in oldheads]
2242 2242 self.ui.log("incoming",
2243 2243 "%s incoming changes - new heads: %s\n",
2244 2244 len(added),
2245 2245 ', '.join([hex(c[:6]) for c in newheads]))
2246 2246 self._afterlock(runhooks)
2247 2247
2248 2248 finally:
2249 2249 tr.release()
2250 2250 # never return 0 here:
2251 2251 if dh < 0:
2252 2252 return dh - 1
2253 2253 else:
2254 2254 return dh + 1
2255 2255
2256 2256 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2257 2257 revisions = 0
2258 2258 files = 0
2259 2259 while True:
2260 2260 chunkdata = source.filelogheader()
2261 2261 if not chunkdata:
2262 2262 break
2263 2263 f = chunkdata["filename"]
2264 2264 self.ui.debug("adding %s revisions\n" % f)
2265 2265 pr()
2266 2266 fl = self.file(f)
2267 2267 o = len(fl)
2268 2268 if not fl.addgroup(source, revmap, trp):
2269 2269 raise util.Abort(_("received file revlog group is empty"))
2270 2270 revisions += len(fl) - o
2271 2271 files += 1
2272 2272 if f in needfiles:
2273 2273 needs = needfiles[f]
2274 2274 for new in xrange(o, len(fl)):
2275 2275 n = fl.node(new)
2276 2276 if n in needs:
2277 2277 needs.remove(n)
2278 2278 else:
2279 2279 raise util.Abort(
2280 2280 _("received spurious file revlog entry"))
2281 2281 if not needs:
2282 2282 del needfiles[f]
2283 2283 self.ui.progress(_('files'), None)
2284 2284
2285 2285 for f, needs in needfiles.iteritems():
2286 2286 fl = self.file(f)
2287 2287 for n in needs:
2288 2288 try:
2289 2289 fl.rev(n)
2290 2290 except error.LookupError:
2291 2291 raise util.Abort(
2292 2292 _('missing file data for %s:%s - run hg verify') %
2293 2293 (f, hex(n)))
2294 2294
2295 2295 return revisions, files
2296 2296
2297 2297 def stream_in(self, remote, requirements):
2298 2298 lock = self.lock()
2299 2299 try:
2300 2300 # Save remote branchmap. We will use it later
2301 2301 # to speed up branchcache creation
2302 2302 rbranchmap = None
2303 2303 if remote.capable("branchmap"):
2304 2304 rbranchmap = remote.branchmap()
2305 2305
2306 2306 fp = remote.stream_out()
2307 2307 l = fp.readline()
2308 2308 try:
2309 2309 resp = int(l)
2310 2310 except ValueError:
2311 2311 raise error.ResponseError(
2312 2312 _('unexpected response from remote server:'), l)
2313 2313 if resp == 1:
2314 2314 raise util.Abort(_('operation forbidden by server'))
2315 2315 elif resp == 2:
2316 2316 raise util.Abort(_('locking the remote repository failed'))
2317 2317 elif resp != 0:
2318 2318 raise util.Abort(_('the server sent an unknown error code'))
2319 2319 self.ui.status(_('streaming all changes\n'))
2320 2320 l = fp.readline()
2321 2321 try:
2322 2322 total_files, total_bytes = map(int, l.split(' ', 1))
2323 2323 except (ValueError, TypeError):
2324 2324 raise error.ResponseError(
2325 2325 _('unexpected response from remote server:'), l)
2326 2326 self.ui.status(_('%d files to transfer, %s of data\n') %
2327 2327 (total_files, util.bytecount(total_bytes)))
2328 2328 handled_bytes = 0
2329 2329 self.ui.progress(_('clone'), 0, total=total_bytes)
2330 2330 start = time.time()
2331 2331 for i in xrange(total_files):
2332 2332 # XXX doesn't support '\n' or '\r' in filenames
2333 2333 l = fp.readline()
2334 2334 try:
2335 2335 name, size = l.split('\0', 1)
2336 2336 size = int(size)
2337 2337 except (ValueError, TypeError):
2338 2338 raise error.ResponseError(
2339 2339 _('unexpected response from remote server:'), l)
2340 2340 if self.ui.debugflag:
2341 2341 self.ui.debug('adding %s (%s)\n' %
2342 2342 (name, util.bytecount(size)))
2343 2343 # for backwards compat, name was partially encoded
2344 2344 ofp = self.sopener(store.decodedir(name), 'w')
2345 2345 for chunk in util.filechunkiter(fp, limit=size):
2346 2346 handled_bytes += len(chunk)
2347 2347 self.ui.progress(_('clone'), handled_bytes,
2348 2348 total=total_bytes)
2349 2349 ofp.write(chunk)
2350 2350 ofp.close()
2351 2351 elapsed = time.time() - start
2352 2352 if elapsed <= 0:
2353 2353 elapsed = 0.001
2354 2354 self.ui.progress(_('clone'), None)
2355 2355 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2356 2356 (util.bytecount(total_bytes), elapsed,
2357 2357 util.bytecount(total_bytes / elapsed)))
2358 2358
2359 2359 # new requirements = old non-format requirements +
2360 2360 # new format-related
2361 2361 # requirements from the streamed-in repository
2362 2362 requirements.update(set(self.requirements) - self.supportedformats)
2363 2363 self._applyrequirements(requirements)
2364 2364 self._writerequirements()
2365 2365
2366 2366 if rbranchmap:
2367 2367 rbheads = []
2368 2368 for bheads in rbranchmap.itervalues():
2369 2369 rbheads.extend(bheads)
2370 2370
2371 2371 if rbheads:
2372 2372 rtiprev = max((int(self.changelog.rev(node))
2373 2373 for node in rbheads))
2374 2374 cache = branchmap.branchcache(rbranchmap,
2375 2375 self[rtiprev].node(),
2376 2376 rtiprev)
2377 2377 # Try to stick it as low as possible
2378 2378 # filter above served are unlikely to be fetch from a clone
2379 2379 for candidate in ('base', 'immutable', 'served'):
2380 2380 rview = self.filtered(candidate)
2381 2381 if cache.validfor(rview):
2382 2382 self._branchcaches[candidate] = cache
2383 2383 cache.write(rview)
2384 2384 break
2385 2385 self.invalidate()
2386 2386 return len(self.heads()) + 1
2387 2387 finally:
2388 2388 lock.release()
2389 2389
2390 2390 def clone(self, remote, heads=[], stream=False):
2391 2391 '''clone remote repository.
2392 2392
2393 2393 keyword arguments:
2394 2394 heads: list of revs to clone (forces use of pull)
2395 2395 stream: use streaming clone if possible'''
2396 2396
2397 2397 # now, all clients that can request uncompressed clones can
2398 2398 # read repo formats supported by all servers that can serve
2399 2399 # them.
2400 2400
2401 2401 # if revlog format changes, client will have to check version
2402 2402 # and format flags on "stream" capability, and use
2403 2403 # uncompressed only if compatible.
2404 2404
2405 2405 if not stream:
2406 2406 # if the server explicitly prefers to stream (for fast LANs)
2407 2407 stream = remote.capable('stream-preferred')
2408 2408
2409 2409 if stream and not heads:
2410 2410 # 'stream' means remote revlog format is revlogv1 only
2411 2411 if remote.capable('stream'):
2412 2412 return self.stream_in(remote, set(('revlogv1',)))
2413 2413 # otherwise, 'streamreqs' contains the remote revlog format
2414 2414 streamreqs = remote.capable('streamreqs')
2415 2415 if streamreqs:
2416 2416 streamreqs = set(streamreqs.split(','))
2417 2417 # if we support it, stream in and adjust our requirements
2418 2418 if not streamreqs - self.supportedformats:
2419 2419 return self.stream_in(remote, streamreqs)
2420 2420 return self.pull(remote, heads)
2421 2421
2422 2422 def pushkey(self, namespace, key, old, new):
2423 2423 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2424 2424 old=old, new=new)
2425 2425 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2426 2426 ret = pushkey.push(self, namespace, key, old, new)
2427 2427 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2428 2428 ret=ret)
2429 2429 return ret
2430 2430
2431 2431 def listkeys(self, namespace):
2432 2432 self.hook('prelistkeys', throw=True, namespace=namespace)
2433 2433 self.ui.debug('listing keys for "%s"\n' % namespace)
2434 2434 values = pushkey.list(self, namespace)
2435 2435 self.hook('listkeys', namespace=namespace, values=values)
2436 2436 return values
2437 2437
2438 2438 def debugwireargs(self, one, two, three=None, four=None, five=None):
2439 2439 '''used to test argument passing over the wire'''
2440 2440 return "%s %s %s %s %s" % (one, two, three, four, five)
2441 2441
2442 2442 def savecommitmessage(self, text):
2443 2443 fp = self.opener('last-message.txt', 'wb')
2444 2444 try:
2445 2445 fp.write(text)
2446 2446 finally:
2447 2447 fp.close()
2448 2448 return self.pathto(fp.name[len(self.root) + 1:])
2449 2449
2450 2450 # used to avoid circular references so destructors work
2451 2451 def aftertrans(files):
2452 2452 renamefiles = [tuple(t) for t in files]
2453 2453 def a():
2454 2454 for vfs, src, dest in renamefiles:
2455 2455 try:
2456 2456 vfs.rename(src, dest)
2457 2457 except OSError: # journal file does not yet exist
2458 2458 pass
2459 2459 return a
2460 2460
2461 2461 def undoname(fn):
2462 2462 base, name = os.path.split(fn)
2463 2463 assert name.startswith('journal')
2464 2464 return os.path.join(base, name.replace('journal', 'undo', 1))
2465 2465
2466 2466 def instance(ui, path, create):
2467 2467 return localrepository(ui, util.urllocalpath(path), create)
2468 2468
2469 2469 def islocal(path):
2470 2470 return True
General Comments 0
You need to be logged in to leave comments. Login now