##// END OF EJS Templates
bundle2-localpeer: properly propagate the server output on error (issue4594)...
Pierre-Yves David -
r24799:d99d7e3f default
parent child Browse files
Show More
@@ -1,1940 +1,1955 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception, exc:
139 # If the exception contains output salvaged from a bundle2
140 # reply, we need to make sure it is printed before continuing
141 # to fail. So we build a bundle2 with such output and consume
142 # it directly.
143 #
144 # This is not very elegant but allows a "simple" solution for
145 # issue4594
146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 if output:
148 bundler = bundle2.bundle20(self._repo.ui)
149 for out in output:
150 bundler.addpart(out)
151 stream = util.chunkbuffer(bundler.getchunks())
152 b = bundle2.getunbundler(self.ui, stream)
153 bundle2.processbundle(self._repo, b)
139 154 raise
140 155 except error.PushRaced, exc:
141 156 raise error.ResponseError(_('push failed:'), str(exc))
142 157
143 158 def lock(self):
144 159 return self._repo.lock()
145 160
146 161 def addchangegroup(self, cg, source, url):
147 162 return changegroup.addchangegroup(self._repo, cg, source, url)
148 163
149 164 def pushkey(self, namespace, key, old, new):
150 165 return self._repo.pushkey(namespace, key, old, new)
151 166
152 167 def listkeys(self, namespace):
153 168 return self._repo.listkeys(namespace)
154 169
155 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
156 171 '''used to test argument passing over the wire'''
157 172 return "%s %s %s %s %s" % (one, two, three, four, five)
158 173
159 174 class locallegacypeer(localpeer):
160 175 '''peer extension which implements legacy methods too; used for tests with
161 176 restricted capabilities'''
162 177
163 178 def __init__(self, repo):
164 179 localpeer.__init__(self, repo, caps=legacycaps)
165 180
166 181 def branches(self, nodes):
167 182 return self._repo.branches(nodes)
168 183
169 184 def between(self, pairs):
170 185 return self._repo.between(pairs)
171 186
172 187 def changegroup(self, basenodes, source):
173 188 return changegroup.changegroup(self._repo, basenodes, source)
174 189
175 190 def changegroupsubset(self, bases, heads, source):
176 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
177 192
178 193 class localrepository(object):
179 194
180 195 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
181 196 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
182 197 'dotencode'))
183 198 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
184 199 requirements = ['revlogv1']
185 200 filtername = None
186 201
187 202 # a list of (ui, featureset) functions.
188 203 # only functions defined in module of enabled extensions are invoked
189 204 featuresetupfuncs = set()
190 205
191 206 def _baserequirements(self, create):
192 207 return self.requirements[:]
193 208
194 209 def __init__(self, baseui, path=None, create=False):
195 210 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
196 211 self.wopener = self.wvfs
197 212 self.root = self.wvfs.base
198 213 self.path = self.wvfs.join(".hg")
199 214 self.origroot = path
200 215 self.auditor = pathutil.pathauditor(self.root, self._checknested)
201 216 self.vfs = scmutil.vfs(self.path)
202 217 self.opener = self.vfs
203 218 self.baseui = baseui
204 219 self.ui = baseui.copy()
205 220 self.ui.copy = baseui.copy # prevent copying repo configuration
206 221 # A list of callback to shape the phase if no data were found.
207 222 # Callback are in the form: func(repo, roots) --> processed root.
208 223 # This list it to be filled by extension during repo setup
209 224 self._phasedefaults = []
210 225 try:
211 226 self.ui.readconfig(self.join("hgrc"), self.root)
212 227 extensions.loadall(self.ui)
213 228 except IOError:
214 229 pass
215 230
216 231 if self.featuresetupfuncs:
217 232 self.supported = set(self._basesupported) # use private copy
218 233 extmods = set(m.__name__ for n, m
219 234 in extensions.extensions(self.ui))
220 235 for setupfunc in self.featuresetupfuncs:
221 236 if setupfunc.__module__ in extmods:
222 237 setupfunc(self.ui, self.supported)
223 238 else:
224 239 self.supported = self._basesupported
225 240
226 241 if not self.vfs.isdir():
227 242 if create:
228 243 if not self.wvfs.exists():
229 244 self.wvfs.makedirs()
230 245 self.vfs.makedir(notindexed=True)
231 246 requirements = self._baserequirements(create)
232 247 if self.ui.configbool('format', 'usestore', True):
233 248 self.vfs.mkdir("store")
234 249 requirements.append("store")
235 250 if self.ui.configbool('format', 'usefncache', True):
236 251 requirements.append("fncache")
237 252 if self.ui.configbool('format', 'dotencode', True):
238 253 requirements.append('dotencode')
239 254 # create an invalid changelog
240 255 self.vfs.append(
241 256 "00changelog.i",
242 257 '\0\0\0\2' # represents revlogv2
243 258 ' dummy changelog to prevent using the old repo layout'
244 259 )
245 260 if self.ui.configbool('format', 'generaldelta', False):
246 261 requirements.append("generaldelta")
247 262 if self.ui.configbool('experimental', 'manifestv2', False):
248 263 requirements.append("manifestv2")
249 264 requirements = set(requirements)
250 265 else:
251 266 raise error.RepoError(_("repository %s not found") % path)
252 267 elif create:
253 268 raise error.RepoError(_("repository %s already exists") % path)
254 269 else:
255 270 try:
256 271 requirements = scmutil.readrequires(self.vfs, self.supported)
257 272 except IOError, inst:
258 273 if inst.errno != errno.ENOENT:
259 274 raise
260 275 requirements = set()
261 276
262 277 self.sharedpath = self.path
263 278 try:
264 279 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
265 280 realpath=True)
266 281 s = vfs.base
267 282 if not vfs.exists():
268 283 raise error.RepoError(
269 284 _('.hg/sharedpath points to nonexistent directory %s') % s)
270 285 self.sharedpath = s
271 286 except IOError, inst:
272 287 if inst.errno != errno.ENOENT:
273 288 raise
274 289
275 290 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
276 291 self.spath = self.store.path
277 292 self.svfs = self.store.vfs
278 293 self.sopener = self.svfs
279 294 self.sjoin = self.store.join
280 295 self.vfs.createmode = self.store.createmode
281 296 self._applyrequirements(requirements)
282 297 if create:
283 298 self._writerequirements()
284 299
285 300
286 301 self._branchcaches = {}
287 302 self._revbranchcache = None
288 303 self.filterpats = {}
289 304 self._datafilters = {}
290 305 self._transref = self._lockref = self._wlockref = None
291 306
292 307 # A cache for various files under .hg/ that tracks file changes,
293 308 # (used by the filecache decorator)
294 309 #
295 310 # Maps a property name to its util.filecacheentry
296 311 self._filecache = {}
297 312
298 313 # hold sets of revision to be filtered
299 314 # should be cleared when something might have changed the filter value:
300 315 # - new changesets,
301 316 # - phase change,
302 317 # - new obsolescence marker,
303 318 # - working directory parent change,
304 319 # - bookmark changes
305 320 self.filteredrevcache = {}
306 321
307 322 # generic mapping between names and nodes
308 323 self.names = namespaces.namespaces()
309 324
310 325 def close(self):
311 326 self._writecaches()
312 327
313 328 def _writecaches(self):
314 329 if self._revbranchcache:
315 330 self._revbranchcache.write()
316 331
317 332 def _restrictcapabilities(self, caps):
318 333 if self.ui.configbool('experimental', 'bundle2-advertise', True):
319 334 caps = set(caps)
320 335 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
321 336 caps.add('bundle2=' + urllib.quote(capsblob))
322 337 return caps
323 338
324 339 def _applyrequirements(self, requirements):
325 340 self.requirements = requirements
326 341 self.svfs.options = dict((r, 1) for r in requirements
327 342 if r in self.openerreqs)
328 343 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
329 344 if chunkcachesize is not None:
330 345 self.svfs.options['chunkcachesize'] = chunkcachesize
331 346 maxchainlen = self.ui.configint('format', 'maxchainlen')
332 347 if maxchainlen is not None:
333 348 self.svfs.options['maxchainlen'] = maxchainlen
334 349 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
335 350 if manifestcachesize is not None:
336 351 self.svfs.options['manifestcachesize'] = manifestcachesize
337 352 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
338 353 if usetreemanifest is not None:
339 354 self.svfs.options['usetreemanifest'] = usetreemanifest
340 355
341 356 def _writerequirements(self):
342 357 reqfile = self.vfs("requires", "w")
343 358 for r in sorted(self.requirements):
344 359 reqfile.write("%s\n" % r)
345 360 reqfile.close()
346 361
347 362 def _checknested(self, path):
348 363 """Determine if path is a legal nested repository."""
349 364 if not path.startswith(self.root):
350 365 return False
351 366 subpath = path[len(self.root) + 1:]
352 367 normsubpath = util.pconvert(subpath)
353 368
354 369 # XXX: Checking against the current working copy is wrong in
355 370 # the sense that it can reject things like
356 371 #
357 372 # $ hg cat -r 10 sub/x.txt
358 373 #
359 374 # if sub/ is no longer a subrepository in the working copy
360 375 # parent revision.
361 376 #
362 377 # However, it can of course also allow things that would have
363 378 # been rejected before, such as the above cat command if sub/
364 379 # is a subrepository now, but was a normal directory before.
365 380 # The old path auditor would have rejected by mistake since it
366 381 # panics when it sees sub/.hg/.
367 382 #
368 383 # All in all, checking against the working copy seems sensible
369 384 # since we want to prevent access to nested repositories on
370 385 # the filesystem *now*.
371 386 ctx = self[None]
372 387 parts = util.splitpath(subpath)
373 388 while parts:
374 389 prefix = '/'.join(parts)
375 390 if prefix in ctx.substate:
376 391 if prefix == normsubpath:
377 392 return True
378 393 else:
379 394 sub = ctx.sub(prefix)
380 395 return sub.checknested(subpath[len(prefix) + 1:])
381 396 else:
382 397 parts.pop()
383 398 return False
384 399
385 400 def peer(self):
386 401 return localpeer(self) # not cached to avoid reference cycle
387 402
388 403 def unfiltered(self):
389 404 """Return unfiltered version of the repository
390 405
391 406 Intended to be overwritten by filtered repo."""
392 407 return self
393 408
394 409 def filtered(self, name):
395 410 """Return a filtered version of a repository"""
396 411 # build a new class with the mixin and the current class
397 412 # (possibly subclass of the repo)
398 413 class proxycls(repoview.repoview, self.unfiltered().__class__):
399 414 pass
400 415 return proxycls(self, name)
401 416
402 417 @repofilecache('bookmarks')
403 418 def _bookmarks(self):
404 419 return bookmarks.bmstore(self)
405 420
406 421 @repofilecache('bookmarks.current')
407 422 def _bookmarkcurrent(self):
408 423 return bookmarks.readcurrent(self)
409 424
410 425 def bookmarkheads(self, bookmark):
411 426 name = bookmark.split('@', 1)[0]
412 427 heads = []
413 428 for mark, n in self._bookmarks.iteritems():
414 429 if mark.split('@', 1)[0] == name:
415 430 heads.append(n)
416 431 return heads
417 432
418 433 @storecache('phaseroots')
419 434 def _phasecache(self):
420 435 return phases.phasecache(self, self._phasedefaults)
421 436
422 437 @storecache('obsstore')
423 438 def obsstore(self):
424 439 # read default format for new obsstore.
425 440 defaultformat = self.ui.configint('format', 'obsstore-version', None)
426 441 # rely on obsstore class default when possible.
427 442 kwargs = {}
428 443 if defaultformat is not None:
429 444 kwargs['defaultformat'] = defaultformat
430 445 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
431 446 store = obsolete.obsstore(self.svfs, readonly=readonly,
432 447 **kwargs)
433 448 if store and readonly:
434 449 self.ui.warn(
435 450 _('obsolete feature not enabled but %i markers found!\n')
436 451 % len(list(store)))
437 452 return store
438 453
439 454 @storecache('00changelog.i')
440 455 def changelog(self):
441 456 c = changelog.changelog(self.svfs)
442 457 if 'HG_PENDING' in os.environ:
443 458 p = os.environ['HG_PENDING']
444 459 if p.startswith(self.root):
445 460 c.readpending('00changelog.i.a')
446 461 return c
447 462
448 463 @storecache('00manifest.i')
449 464 def manifest(self):
450 465 return manifest.manifest(self.svfs)
451 466
452 467 @repofilecache('dirstate')
453 468 def dirstate(self):
454 469 warned = [0]
455 470 def validate(node):
456 471 try:
457 472 self.changelog.rev(node)
458 473 return node
459 474 except error.LookupError:
460 475 if not warned[0]:
461 476 warned[0] = True
462 477 self.ui.warn(_("warning: ignoring unknown"
463 478 " working parent %s!\n") % short(node))
464 479 return nullid
465 480
466 481 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
467 482
468 483 def __getitem__(self, changeid):
469 484 if changeid is None:
470 485 return context.workingctx(self)
471 486 if isinstance(changeid, slice):
472 487 return [context.changectx(self, i)
473 488 for i in xrange(*changeid.indices(len(self)))
474 489 if i not in self.changelog.filteredrevs]
475 490 return context.changectx(self, changeid)
476 491
477 492 def __contains__(self, changeid):
478 493 try:
479 494 self[changeid]
480 495 return True
481 496 except error.RepoLookupError:
482 497 return False
483 498
484 499 def __nonzero__(self):
485 500 return True
486 501
487 502 def __len__(self):
488 503 return len(self.changelog)
489 504
490 505 def __iter__(self):
491 506 return iter(self.changelog)
492 507
493 508 def revs(self, expr, *args):
494 509 '''Return a list of revisions matching the given revset'''
495 510 expr = revset.formatspec(expr, *args)
496 511 m = revset.match(None, expr)
497 512 return m(self)
498 513
499 514 def set(self, expr, *args):
500 515 '''
501 516 Yield a context for each matching revision, after doing arg
502 517 replacement via revset.formatspec
503 518 '''
504 519 for r in self.revs(expr, *args):
505 520 yield self[r]
506 521
507 522 def url(self):
508 523 return 'file:' + self.root
509 524
510 525 def hook(self, name, throw=False, **args):
511 526 """Call a hook, passing this repo instance.
512 527
513 528 This a convenience method to aid invoking hooks. Extensions likely
514 529 won't call this unless they have registered a custom hook or are
515 530 replacing code that is expected to call a hook.
516 531 """
517 532 return hook.hook(self.ui, self, name, throw, **args)
518 533
519 534 @unfilteredmethod
520 535 def _tag(self, names, node, message, local, user, date, extra={},
521 536 editor=False):
522 537 if isinstance(names, str):
523 538 names = (names,)
524 539
525 540 branches = self.branchmap()
526 541 for name in names:
527 542 self.hook('pretag', throw=True, node=hex(node), tag=name,
528 543 local=local)
529 544 if name in branches:
530 545 self.ui.warn(_("warning: tag %s conflicts with existing"
531 546 " branch name\n") % name)
532 547
533 548 def writetags(fp, names, munge, prevtags):
534 549 fp.seek(0, 2)
535 550 if prevtags and prevtags[-1] != '\n':
536 551 fp.write('\n')
537 552 for name in names:
538 553 if munge:
539 554 m = munge(name)
540 555 else:
541 556 m = name
542 557
543 558 if (self._tagscache.tagtypes and
544 559 name in self._tagscache.tagtypes):
545 560 old = self.tags().get(name, nullid)
546 561 fp.write('%s %s\n' % (hex(old), m))
547 562 fp.write('%s %s\n' % (hex(node), m))
548 563 fp.close()
549 564
550 565 prevtags = ''
551 566 if local:
552 567 try:
553 568 fp = self.vfs('localtags', 'r+')
554 569 except IOError:
555 570 fp = self.vfs('localtags', 'a')
556 571 else:
557 572 prevtags = fp.read()
558 573
559 574 # local tags are stored in the current charset
560 575 writetags(fp, names, None, prevtags)
561 576 for name in names:
562 577 self.hook('tag', node=hex(node), tag=name, local=local)
563 578 return
564 579
565 580 try:
566 581 fp = self.wfile('.hgtags', 'rb+')
567 582 except IOError, e:
568 583 if e.errno != errno.ENOENT:
569 584 raise
570 585 fp = self.wfile('.hgtags', 'ab')
571 586 else:
572 587 prevtags = fp.read()
573 588
574 589 # committed tags are stored in UTF-8
575 590 writetags(fp, names, encoding.fromlocal, prevtags)
576 591
577 592 fp.close()
578 593
579 594 self.invalidatecaches()
580 595
581 596 if '.hgtags' not in self.dirstate:
582 597 self[None].add(['.hgtags'])
583 598
584 599 m = matchmod.exact(self.root, '', ['.hgtags'])
585 600 tagnode = self.commit(message, user, date, extra=extra, match=m,
586 601 editor=editor)
587 602
588 603 for name in names:
589 604 self.hook('tag', node=hex(node), tag=name, local=local)
590 605
591 606 return tagnode
592 607
593 608 def tag(self, names, node, message, local, user, date, editor=False):
594 609 '''tag a revision with one or more symbolic names.
595 610
596 611 names is a list of strings or, when adding a single tag, names may be a
597 612 string.
598 613
599 614 if local is True, the tags are stored in a per-repository file.
600 615 otherwise, they are stored in the .hgtags file, and a new
601 616 changeset is committed with the change.
602 617
603 618 keyword arguments:
604 619
605 620 local: whether to store tags in non-version-controlled file
606 621 (default False)
607 622
608 623 message: commit message to use if committing
609 624
610 625 user: name of user to use if committing
611 626
612 627 date: date tuple to use if committing'''
613 628
614 629 if not local:
615 630 m = matchmod.exact(self.root, '', ['.hgtags'])
616 631 if util.any(self.status(match=m, unknown=True, ignored=True)):
617 632 raise util.Abort(_('working copy of .hgtags is changed'),
618 633 hint=_('please commit .hgtags manually'))
619 634
620 635 self.tags() # instantiate the cache
621 636 self._tag(names, node, message, local, user, date, editor=editor)
622 637
623 638 @filteredpropertycache
624 639 def _tagscache(self):
625 640 '''Returns a tagscache object that contains various tags related
626 641 caches.'''
627 642
628 643 # This simplifies its cache management by having one decorated
629 644 # function (this one) and the rest simply fetch things from it.
630 645 class tagscache(object):
631 646 def __init__(self):
632 647 # These two define the set of tags for this repository. tags
633 648 # maps tag name to node; tagtypes maps tag name to 'global' or
634 649 # 'local'. (Global tags are defined by .hgtags across all
635 650 # heads, and local tags are defined in .hg/localtags.)
636 651 # They constitute the in-memory cache of tags.
637 652 self.tags = self.tagtypes = None
638 653
639 654 self.nodetagscache = self.tagslist = None
640 655
641 656 cache = tagscache()
642 657 cache.tags, cache.tagtypes = self._findtags()
643 658
644 659 return cache
645 660
646 661 def tags(self):
647 662 '''return a mapping of tag to node'''
648 663 t = {}
649 664 if self.changelog.filteredrevs:
650 665 tags, tt = self._findtags()
651 666 else:
652 667 tags = self._tagscache.tags
653 668 for k, v in tags.iteritems():
654 669 try:
655 670 # ignore tags to unknown nodes
656 671 self.changelog.rev(v)
657 672 t[k] = v
658 673 except (error.LookupError, ValueError):
659 674 pass
660 675 return t
661 676
662 677 def _findtags(self):
663 678 '''Do the hard work of finding tags. Return a pair of dicts
664 679 (tags, tagtypes) where tags maps tag name to node, and tagtypes
665 680 maps tag name to a string like \'global\' or \'local\'.
666 681 Subclasses or extensions are free to add their own tags, but
667 682 should be aware that the returned dicts will be retained for the
668 683 duration of the localrepo object.'''
669 684
670 685 # XXX what tagtype should subclasses/extensions use? Currently
671 686 # mq and bookmarks add tags, but do not set the tagtype at all.
672 687 # Should each extension invent its own tag type? Should there
673 688 # be one tagtype for all such "virtual" tags? Or is the status
674 689 # quo fine?
675 690
676 691 alltags = {} # map tag name to (node, hist)
677 692 tagtypes = {}
678 693
679 694 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
680 695 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
681 696
682 697 # Build the return dicts. Have to re-encode tag names because
683 698 # the tags module always uses UTF-8 (in order not to lose info
684 699 # writing to the cache), but the rest of Mercurial wants them in
685 700 # local encoding.
686 701 tags = {}
687 702 for (name, (node, hist)) in alltags.iteritems():
688 703 if node != nullid:
689 704 tags[encoding.tolocal(name)] = node
690 705 tags['tip'] = self.changelog.tip()
691 706 tagtypes = dict([(encoding.tolocal(name), value)
692 707 for (name, value) in tagtypes.iteritems()])
693 708 return (tags, tagtypes)
694 709
695 710 def tagtype(self, tagname):
696 711 '''
697 712 return the type of the given tag. result can be:
698 713
699 714 'local' : a local tag
700 715 'global' : a global tag
701 716 None : tag does not exist
702 717 '''
703 718
704 719 return self._tagscache.tagtypes.get(tagname)
705 720
706 721 def tagslist(self):
707 722 '''return a list of tags ordered by revision'''
708 723 if not self._tagscache.tagslist:
709 724 l = []
710 725 for t, n in self.tags().iteritems():
711 726 l.append((self.changelog.rev(n), t, n))
712 727 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
713 728
714 729 return self._tagscache.tagslist
715 730
716 731 def nodetags(self, node):
717 732 '''return the tags associated with a node'''
718 733 if not self._tagscache.nodetagscache:
719 734 nodetagscache = {}
720 735 for t, n in self._tagscache.tags.iteritems():
721 736 nodetagscache.setdefault(n, []).append(t)
722 737 for tags in nodetagscache.itervalues():
723 738 tags.sort()
724 739 self._tagscache.nodetagscache = nodetagscache
725 740 return self._tagscache.nodetagscache.get(node, [])
726 741
727 742 def nodebookmarks(self, node):
728 743 marks = []
729 744 for bookmark, n in self._bookmarks.iteritems():
730 745 if n == node:
731 746 marks.append(bookmark)
732 747 return sorted(marks)
733 748
734 749 def branchmap(self):
735 750 '''returns a dictionary {branch: [branchheads]} with branchheads
736 751 ordered by increasing revision number'''
737 752 branchmap.updatecache(self)
738 753 return self._branchcaches[self.filtername]
739 754
740 755 @unfilteredmethod
741 756 def revbranchcache(self):
742 757 if not self._revbranchcache:
743 758 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
744 759 return self._revbranchcache
745 760
746 761 def branchtip(self, branch, ignoremissing=False):
747 762 '''return the tip node for a given branch
748 763
749 764 If ignoremissing is True, then this method will not raise an error.
750 765 This is helpful for callers that only expect None for a missing branch
751 766 (e.g. namespace).
752 767
753 768 '''
754 769 try:
755 770 return self.branchmap().branchtip(branch)
756 771 except KeyError:
757 772 if not ignoremissing:
758 773 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
759 774 else:
760 775 pass
761 776
762 777 def lookup(self, key):
763 778 return self[key].node()
764 779
765 780 def lookupbranch(self, key, remote=None):
766 781 repo = remote or self
767 782 if key in repo.branchmap():
768 783 return key
769 784
770 785 repo = (remote and remote.local()) and remote or self
771 786 return repo[key].branch()
772 787
773 788 def known(self, nodes):
774 789 nm = self.changelog.nodemap
775 790 pc = self._phasecache
776 791 result = []
777 792 for n in nodes:
778 793 r = nm.get(n)
779 794 resp = not (r is None or pc.phase(self, r) >= phases.secret)
780 795 result.append(resp)
781 796 return result
782 797
783 798 def local(self):
784 799 return self
785 800
786 801 def cancopy(self):
787 802 # so statichttprepo's override of local() works
788 803 if not self.local():
789 804 return False
790 805 if not self.ui.configbool('phases', 'publish', True):
791 806 return True
792 807 # if publishing we can't copy if there is filtered content
793 808 return not self.filtered('visible').changelog.filteredrevs
794 809
795 810 def shared(self):
796 811 '''the type of shared repository (None if not shared)'''
797 812 if self.sharedpath != self.path:
798 813 return 'store'
799 814 return None
800 815
801 816 def join(self, f, *insidef):
802 817 return self.vfs.join(os.path.join(f, *insidef))
803 818
804 819 def wjoin(self, f, *insidef):
805 820 return self.vfs.reljoin(self.root, f, *insidef)
806 821
807 822 def file(self, f):
808 823 if f[0] == '/':
809 824 f = f[1:]
810 825 return filelog.filelog(self.svfs, f)
811 826
812 827 def changectx(self, changeid):
813 828 return self[changeid]
814 829
815 830 def parents(self, changeid=None):
816 831 '''get list of changectxs for parents of changeid'''
817 832 return self[changeid].parents()
818 833
819 834 def setparents(self, p1, p2=nullid):
820 835 self.dirstate.beginparentchange()
821 836 copies = self.dirstate.setparents(p1, p2)
822 837 pctx = self[p1]
823 838 if copies:
824 839 # Adjust copy records, the dirstate cannot do it, it
825 840 # requires access to parents manifests. Preserve them
826 841 # only for entries added to first parent.
827 842 for f in copies:
828 843 if f not in pctx and copies[f] in pctx:
829 844 self.dirstate.copy(copies[f], f)
830 845 if p2 == nullid:
831 846 for f, s in sorted(self.dirstate.copies().items()):
832 847 if f not in pctx and s not in pctx:
833 848 self.dirstate.copy(None, f)
834 849 self.dirstate.endparentchange()
835 850
836 851 def filectx(self, path, changeid=None, fileid=None):
837 852 """changeid can be a changeset revision, node, or tag.
838 853 fileid can be a file revision or node."""
839 854 return context.filectx(self, path, changeid, fileid)
840 855
841 856 def getcwd(self):
842 857 return self.dirstate.getcwd()
843 858
844 859 def pathto(self, f, cwd=None):
845 860 return self.dirstate.pathto(f, cwd)
846 861
847 862 def wfile(self, f, mode='r'):
848 863 return self.wvfs(f, mode)
849 864
850 865 def _link(self, f):
851 866 return self.wvfs.islink(f)
852 867
853 868 def _loadfilter(self, filter):
854 869 if filter not in self.filterpats:
855 870 l = []
856 871 for pat, cmd in self.ui.configitems(filter):
857 872 if cmd == '!':
858 873 continue
859 874 mf = matchmod.match(self.root, '', [pat])
860 875 fn = None
861 876 params = cmd
862 877 for name, filterfn in self._datafilters.iteritems():
863 878 if cmd.startswith(name):
864 879 fn = filterfn
865 880 params = cmd[len(name):].lstrip()
866 881 break
867 882 if not fn:
868 883 fn = lambda s, c, **kwargs: util.filter(s, c)
869 884 # Wrap old filters not supporting keyword arguments
870 885 if not inspect.getargspec(fn)[2]:
871 886 oldfn = fn
872 887 fn = lambda s, c, **kwargs: oldfn(s, c)
873 888 l.append((mf, fn, params))
874 889 self.filterpats[filter] = l
875 890 return self.filterpats[filter]
876 891
877 892 def _filter(self, filterpats, filename, data):
878 893 for mf, fn, cmd in filterpats:
879 894 if mf(filename):
880 895 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
881 896 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
882 897 break
883 898
884 899 return data
885 900
886 901 @unfilteredpropertycache
887 902 def _encodefilterpats(self):
888 903 return self._loadfilter('encode')
889 904
890 905 @unfilteredpropertycache
891 906 def _decodefilterpats(self):
892 907 return self._loadfilter('decode')
893 908
894 909 def adddatafilter(self, name, filter):
895 910 self._datafilters[name] = filter
896 911
897 912 def wread(self, filename):
898 913 if self._link(filename):
899 914 data = self.wvfs.readlink(filename)
900 915 else:
901 916 data = self.wvfs.read(filename)
902 917 return self._filter(self._encodefilterpats, filename, data)
903 918
904 919 def wwrite(self, filename, data, flags):
905 920 data = self._filter(self._decodefilterpats, filename, data)
906 921 if 'l' in flags:
907 922 self.wvfs.symlink(data, filename)
908 923 else:
909 924 self.wvfs.write(filename, data)
910 925 if 'x' in flags:
911 926 self.wvfs.setflags(filename, False, True)
912 927
913 928 def wwritedata(self, filename, data):
914 929 return self._filter(self._decodefilterpats, filename, data)
915 930
916 931 def currenttransaction(self):
917 932 """return the current transaction or None if non exists"""
918 933 if self._transref:
919 934 tr = self._transref()
920 935 else:
921 936 tr = None
922 937
923 938 if tr and tr.running():
924 939 return tr
925 940 return None
926 941
927 942 def transaction(self, desc, report=None):
928 943 if (self.ui.configbool('devel', 'all')
929 944 or self.ui.configbool('devel', 'check-locks')):
930 945 l = self._lockref and self._lockref()
931 946 if l is None or not l.held:
932 947 scmutil.develwarn(self.ui, 'transaction with no lock')
933 948 tr = self.currenttransaction()
934 949 if tr is not None:
935 950 return tr.nest()
936 951
937 952 # abort here if the journal already exists
938 953 if self.svfs.exists("journal"):
939 954 raise error.RepoError(
940 955 _("abandoned transaction found"),
941 956 hint=_("run 'hg recover' to clean up transaction"))
942 957
943 958 self.hook('pretxnopen', throw=True, txnname=desc)
944 959
945 960 self._writejournal(desc)
946 961 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
947 962 if report:
948 963 rp = report
949 964 else:
950 965 rp = self.ui.warn
951 966 vfsmap = {'plain': self.vfs} # root of .hg/
952 967 # we must avoid cyclic reference between repo and transaction.
953 968 reporef = weakref.ref(self)
954 969 def validate(tr):
955 970 """will run pre-closing hooks"""
956 971 pending = lambda: tr.writepending() and self.root or ""
957 972 reporef().hook('pretxnclose', throw=True, pending=pending,
958 973 xnname=desc, **tr.hookargs)
959 974
960 975 tr = transaction.transaction(rp, self.sopener, vfsmap,
961 976 "journal",
962 977 "undo",
963 978 aftertrans(renames),
964 979 self.store.createmode,
965 980 validator=validate)
966 981
967 982 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
968 983 tr.hookargs['TXNID'] = trid
969 984 # note: writing the fncache only during finalize mean that the file is
970 985 # outdated when running hooks. As fncache is used for streaming clone,
971 986 # this is not expected to break anything that happen during the hooks.
972 987 tr.addfinalize('flush-fncache', self.store.write)
973 988 def txnclosehook(tr2):
974 989 """To be run if transaction is successful, will schedule a hook run
975 990 """
976 991 def hook():
977 992 reporef().hook('txnclose', throw=False, txnname=desc,
978 993 **tr2.hookargs)
979 994 reporef()._afterlock(hook)
980 995 tr.addfinalize('txnclose-hook', txnclosehook)
981 996 def txnaborthook(tr2):
982 997 """To be run if transaction is aborted
983 998 """
984 999 reporef().hook('txnabort', throw=False, txnname=desc,
985 1000 **tr2.hookargs)
986 1001 tr.addabort('txnabort-hook', txnaborthook)
987 1002 self._transref = weakref.ref(tr)
988 1003 return tr
989 1004
990 1005 def _journalfiles(self):
991 1006 return ((self.svfs, 'journal'),
992 1007 (self.vfs, 'journal.dirstate'),
993 1008 (self.vfs, 'journal.branch'),
994 1009 (self.vfs, 'journal.desc'),
995 1010 (self.vfs, 'journal.bookmarks'),
996 1011 (self.svfs, 'journal.phaseroots'))
997 1012
998 1013 def undofiles(self):
999 1014 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1000 1015
1001 1016 def _writejournal(self, desc):
1002 1017 self.vfs.write("journal.dirstate",
1003 1018 self.vfs.tryread("dirstate"))
1004 1019 self.vfs.write("journal.branch",
1005 1020 encoding.fromlocal(self.dirstate.branch()))
1006 1021 self.vfs.write("journal.desc",
1007 1022 "%d\n%s\n" % (len(self), desc))
1008 1023 self.vfs.write("journal.bookmarks",
1009 1024 self.vfs.tryread("bookmarks"))
1010 1025 self.svfs.write("journal.phaseroots",
1011 1026 self.svfs.tryread("phaseroots"))
1012 1027
1013 1028 def recover(self):
1014 1029 lock = self.lock()
1015 1030 try:
1016 1031 if self.svfs.exists("journal"):
1017 1032 self.ui.status(_("rolling back interrupted transaction\n"))
1018 1033 vfsmap = {'': self.svfs,
1019 1034 'plain': self.vfs,}
1020 1035 transaction.rollback(self.svfs, vfsmap, "journal",
1021 1036 self.ui.warn)
1022 1037 self.invalidate()
1023 1038 return True
1024 1039 else:
1025 1040 self.ui.warn(_("no interrupted transaction available\n"))
1026 1041 return False
1027 1042 finally:
1028 1043 lock.release()
1029 1044
1030 1045 def rollback(self, dryrun=False, force=False):
1031 1046 wlock = lock = None
1032 1047 try:
1033 1048 wlock = self.wlock()
1034 1049 lock = self.lock()
1035 1050 if self.svfs.exists("undo"):
1036 1051 return self._rollback(dryrun, force)
1037 1052 else:
1038 1053 self.ui.warn(_("no rollback information available\n"))
1039 1054 return 1
1040 1055 finally:
1041 1056 release(lock, wlock)
1042 1057
1043 1058 @unfilteredmethod # Until we get smarter cache management
1044 1059 def _rollback(self, dryrun, force):
1045 1060 ui = self.ui
1046 1061 try:
1047 1062 args = self.vfs.read('undo.desc').splitlines()
1048 1063 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1049 1064 if len(args) >= 3:
1050 1065 detail = args[2]
1051 1066 oldtip = oldlen - 1
1052 1067
1053 1068 if detail and ui.verbose:
1054 1069 msg = (_('repository tip rolled back to revision %s'
1055 1070 ' (undo %s: %s)\n')
1056 1071 % (oldtip, desc, detail))
1057 1072 else:
1058 1073 msg = (_('repository tip rolled back to revision %s'
1059 1074 ' (undo %s)\n')
1060 1075 % (oldtip, desc))
1061 1076 except IOError:
1062 1077 msg = _('rolling back unknown transaction\n')
1063 1078 desc = None
1064 1079
1065 1080 if not force and self['.'] != self['tip'] and desc == 'commit':
1066 1081 raise util.Abort(
1067 1082 _('rollback of last commit while not checked out '
1068 1083 'may lose data'), hint=_('use -f to force'))
1069 1084
1070 1085 ui.status(msg)
1071 1086 if dryrun:
1072 1087 return 0
1073 1088
1074 1089 parents = self.dirstate.parents()
1075 1090 self.destroying()
1076 1091 vfsmap = {'plain': self.vfs, '': self.svfs}
1077 1092 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1078 1093 if self.vfs.exists('undo.bookmarks'):
1079 1094 self.vfs.rename('undo.bookmarks', 'bookmarks')
1080 1095 if self.svfs.exists('undo.phaseroots'):
1081 1096 self.svfs.rename('undo.phaseroots', 'phaseroots')
1082 1097 self.invalidate()
1083 1098
1084 1099 parentgone = (parents[0] not in self.changelog.nodemap or
1085 1100 parents[1] not in self.changelog.nodemap)
1086 1101 if parentgone:
1087 1102 self.vfs.rename('undo.dirstate', 'dirstate')
1088 1103 try:
1089 1104 branch = self.vfs.read('undo.branch')
1090 1105 self.dirstate.setbranch(encoding.tolocal(branch))
1091 1106 except IOError:
1092 1107 ui.warn(_('named branch could not be reset: '
1093 1108 'current branch is still \'%s\'\n')
1094 1109 % self.dirstate.branch())
1095 1110
1096 1111 self.dirstate.invalidate()
1097 1112 parents = tuple([p.rev() for p in self.parents()])
1098 1113 if len(parents) > 1:
1099 1114 ui.status(_('working directory now based on '
1100 1115 'revisions %d and %d\n') % parents)
1101 1116 else:
1102 1117 ui.status(_('working directory now based on '
1103 1118 'revision %d\n') % parents)
1104 1119 ms = mergemod.mergestate(self)
1105 1120 ms.reset(self['.'].node())
1106 1121
1107 1122 # TODO: if we know which new heads may result from this rollback, pass
1108 1123 # them to destroy(), which will prevent the branchhead cache from being
1109 1124 # invalidated.
1110 1125 self.destroyed()
1111 1126 return 0
1112 1127
1113 1128 def invalidatecaches(self):
1114 1129
1115 1130 if '_tagscache' in vars(self):
1116 1131 # can't use delattr on proxy
1117 1132 del self.__dict__['_tagscache']
1118 1133
1119 1134 self.unfiltered()._branchcaches.clear()
1120 1135 self.invalidatevolatilesets()
1121 1136
1122 1137 def invalidatevolatilesets(self):
1123 1138 self.filteredrevcache.clear()
1124 1139 obsolete.clearobscaches(self)
1125 1140
1126 1141 def invalidatedirstate(self):
1127 1142 '''Invalidates the dirstate, causing the next call to dirstate
1128 1143 to check if it was modified since the last time it was read,
1129 1144 rereading it if it has.
1130 1145
1131 1146 This is different to dirstate.invalidate() that it doesn't always
1132 1147 rereads the dirstate. Use dirstate.invalidate() if you want to
1133 1148 explicitly read the dirstate again (i.e. restoring it to a previous
1134 1149 known good state).'''
1135 1150 if hasunfilteredcache(self, 'dirstate'):
1136 1151 for k in self.dirstate._filecache:
1137 1152 try:
1138 1153 delattr(self.dirstate, k)
1139 1154 except AttributeError:
1140 1155 pass
1141 1156 delattr(self.unfiltered(), 'dirstate')
1142 1157
1143 1158 def invalidate(self):
1144 1159 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1145 1160 for k in self._filecache:
1146 1161 # dirstate is invalidated separately in invalidatedirstate()
1147 1162 if k == 'dirstate':
1148 1163 continue
1149 1164
1150 1165 try:
1151 1166 delattr(unfiltered, k)
1152 1167 except AttributeError:
1153 1168 pass
1154 1169 self.invalidatecaches()
1155 1170 self.store.invalidatecaches()
1156 1171
1157 1172 def invalidateall(self):
1158 1173 '''Fully invalidates both store and non-store parts, causing the
1159 1174 subsequent operation to reread any outside changes.'''
1160 1175 # extension should hook this to invalidate its caches
1161 1176 self.invalidate()
1162 1177 self.invalidatedirstate()
1163 1178
1164 1179 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1165 1180 try:
1166 1181 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1167 1182 except error.LockHeld, inst:
1168 1183 if not wait:
1169 1184 raise
1170 1185 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1171 1186 (desc, inst.locker))
1172 1187 # default to 600 seconds timeout
1173 1188 l = lockmod.lock(vfs, lockname,
1174 1189 int(self.ui.config("ui", "timeout", "600")),
1175 1190 releasefn, desc=desc)
1176 1191 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1177 1192 if acquirefn:
1178 1193 acquirefn()
1179 1194 return l
1180 1195
1181 1196 def _afterlock(self, callback):
1182 1197 """add a callback to the current repository lock.
1183 1198
1184 1199 The callback will be executed on lock release."""
1185 1200 l = self._lockref and self._lockref()
1186 1201 if l:
1187 1202 l.postrelease.append(callback)
1188 1203 else:
1189 1204 callback()
1190 1205
1191 1206 def lock(self, wait=True):
1192 1207 '''Lock the repository store (.hg/store) and return a weak reference
1193 1208 to the lock. Use this before modifying the store (e.g. committing or
1194 1209 stripping). If you are opening a transaction, get a lock as well.)
1195 1210
1196 1211 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1197 1212 'wlock' first to avoid a dead-lock hazard.'''
1198 1213 l = self._lockref and self._lockref()
1199 1214 if l is not None and l.held:
1200 1215 l.lock()
1201 1216 return l
1202 1217
1203 1218 def unlock():
1204 1219 for k, ce in self._filecache.items():
1205 1220 if k == 'dirstate' or k not in self.__dict__:
1206 1221 continue
1207 1222 ce.refresh()
1208 1223
1209 1224 l = self._lock(self.svfs, "lock", wait, unlock,
1210 1225 self.invalidate, _('repository %s') % self.origroot)
1211 1226 self._lockref = weakref.ref(l)
1212 1227 return l
1213 1228
1214 1229 def wlock(self, wait=True):
1215 1230 '''Lock the non-store parts of the repository (everything under
1216 1231 .hg except .hg/store) and return a weak reference to the lock.
1217 1232
1218 1233 Use this before modifying files in .hg.
1219 1234
1220 1235 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1221 1236 'wlock' first to avoid a dead-lock hazard.'''
1222 1237 l = self._wlockref and self._wlockref()
1223 1238 if l is not None and l.held:
1224 1239 l.lock()
1225 1240 return l
1226 1241
1227 1242 # We do not need to check for non-waiting lock aquisition. Such
1228 1243 # acquisition would not cause dead-lock as they would just fail.
1229 1244 if wait and (self.ui.configbool('devel', 'all')
1230 1245 or self.ui.configbool('devel', 'check-locks')):
1231 1246 l = self._lockref and self._lockref()
1232 1247 if l is not None and l.held:
1233 1248 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1234 1249
1235 1250 def unlock():
1236 1251 if self.dirstate.pendingparentchange():
1237 1252 self.dirstate.invalidate()
1238 1253 else:
1239 1254 self.dirstate.write()
1240 1255
1241 1256 self._filecache['dirstate'].refresh()
1242 1257
1243 1258 l = self._lock(self.vfs, "wlock", wait, unlock,
1244 1259 self.invalidatedirstate, _('working directory of %s') %
1245 1260 self.origroot)
1246 1261 self._wlockref = weakref.ref(l)
1247 1262 return l
1248 1263
1249 1264 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1250 1265 """
1251 1266 commit an individual file as part of a larger transaction
1252 1267 """
1253 1268
1254 1269 fname = fctx.path()
1255 1270 fparent1 = manifest1.get(fname, nullid)
1256 1271 fparent2 = manifest2.get(fname, nullid)
1257 1272 if isinstance(fctx, context.filectx):
1258 1273 node = fctx.filenode()
1259 1274 if node in [fparent1, fparent2]:
1260 1275 self.ui.debug('reusing %s filelog entry\n' % fname)
1261 1276 return node
1262 1277
1263 1278 flog = self.file(fname)
1264 1279 meta = {}
1265 1280 copy = fctx.renamed()
1266 1281 if copy and copy[0] != fname:
1267 1282 # Mark the new revision of this file as a copy of another
1268 1283 # file. This copy data will effectively act as a parent
1269 1284 # of this new revision. If this is a merge, the first
1270 1285 # parent will be the nullid (meaning "look up the copy data")
1271 1286 # and the second one will be the other parent. For example:
1272 1287 #
1273 1288 # 0 --- 1 --- 3 rev1 changes file foo
1274 1289 # \ / rev2 renames foo to bar and changes it
1275 1290 # \- 2 -/ rev3 should have bar with all changes and
1276 1291 # should record that bar descends from
1277 1292 # bar in rev2 and foo in rev1
1278 1293 #
1279 1294 # this allows this merge to succeed:
1280 1295 #
1281 1296 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1282 1297 # \ / merging rev3 and rev4 should use bar@rev2
1283 1298 # \- 2 --- 4 as the merge base
1284 1299 #
1285 1300
1286 1301 cfname = copy[0]
1287 1302 crev = manifest1.get(cfname)
1288 1303 newfparent = fparent2
1289 1304
1290 1305 if manifest2: # branch merge
1291 1306 if fparent2 == nullid or crev is None: # copied on remote side
1292 1307 if cfname in manifest2:
1293 1308 crev = manifest2[cfname]
1294 1309 newfparent = fparent1
1295 1310
1296 1311 # Here, we used to search backwards through history to try to find
1297 1312 # where the file copy came from if the source of a copy was not in
1298 1313 # the parent directory. However, this doesn't actually make sense to
1299 1314 # do (what does a copy from something not in your working copy even
1300 1315 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1301 1316 # the user that copy information was dropped, so if they didn't
1302 1317 # expect this outcome it can be fixed, but this is the correct
1303 1318 # behavior in this circumstance.
1304 1319
1305 1320 if crev:
1306 1321 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1307 1322 meta["copy"] = cfname
1308 1323 meta["copyrev"] = hex(crev)
1309 1324 fparent1, fparent2 = nullid, newfparent
1310 1325 else:
1311 1326 self.ui.warn(_("warning: can't find ancestor for '%s' "
1312 1327 "copied from '%s'!\n") % (fname, cfname))
1313 1328
1314 1329 elif fparent1 == nullid:
1315 1330 fparent1, fparent2 = fparent2, nullid
1316 1331 elif fparent2 != nullid:
1317 1332 # is one parent an ancestor of the other?
1318 1333 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1319 1334 if fparent1 in fparentancestors:
1320 1335 fparent1, fparent2 = fparent2, nullid
1321 1336 elif fparent2 in fparentancestors:
1322 1337 fparent2 = nullid
1323 1338
1324 1339 # is the file changed?
1325 1340 text = fctx.data()
1326 1341 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1327 1342 changelist.append(fname)
1328 1343 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1329 1344 # are just the flags changed during merge?
1330 1345 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1331 1346 changelist.append(fname)
1332 1347
1333 1348 return fparent1
1334 1349
1335 1350 @unfilteredmethod
1336 1351 def commit(self, text="", user=None, date=None, match=None, force=False,
1337 1352 editor=False, extra={}):
1338 1353 """Add a new revision to current repository.
1339 1354
1340 1355 Revision information is gathered from the working directory,
1341 1356 match can be used to filter the committed files. If editor is
1342 1357 supplied, it is called to get a commit message.
1343 1358 """
1344 1359
1345 1360 def fail(f, msg):
1346 1361 raise util.Abort('%s: %s' % (f, msg))
1347 1362
1348 1363 if not match:
1349 1364 match = matchmod.always(self.root, '')
1350 1365
1351 1366 if not force:
1352 1367 vdirs = []
1353 1368 match.explicitdir = vdirs.append
1354 1369 match.bad = fail
1355 1370
1356 1371 wlock = self.wlock()
1357 1372 try:
1358 1373 wctx = self[None]
1359 1374 merge = len(wctx.parents()) > 1
1360 1375
1361 1376 if not force and merge and not match.always():
1362 1377 raise util.Abort(_('cannot partially commit a merge '
1363 1378 '(do not specify files or patterns)'))
1364 1379
1365 1380 status = self.status(match=match, clean=force)
1366 1381 if force:
1367 1382 status.modified.extend(status.clean) # mq may commit clean files
1368 1383
1369 1384 # check subrepos
1370 1385 subs = []
1371 1386 commitsubs = set()
1372 1387 newstate = wctx.substate.copy()
1373 1388 # only manage subrepos and .hgsubstate if .hgsub is present
1374 1389 if '.hgsub' in wctx:
1375 1390 # we'll decide whether to track this ourselves, thanks
1376 1391 for c in status.modified, status.added, status.removed:
1377 1392 if '.hgsubstate' in c:
1378 1393 c.remove('.hgsubstate')
1379 1394
1380 1395 # compare current state to last committed state
1381 1396 # build new substate based on last committed state
1382 1397 oldstate = wctx.p1().substate
1383 1398 for s in sorted(newstate.keys()):
1384 1399 if not match(s):
1385 1400 # ignore working copy, use old state if present
1386 1401 if s in oldstate:
1387 1402 newstate[s] = oldstate[s]
1388 1403 continue
1389 1404 if not force:
1390 1405 raise util.Abort(
1391 1406 _("commit with new subrepo %s excluded") % s)
1392 1407 dirtyreason = wctx.sub(s).dirtyreason(True)
1393 1408 if dirtyreason:
1394 1409 if not self.ui.configbool('ui', 'commitsubrepos'):
1395 1410 raise util.Abort(dirtyreason,
1396 1411 hint=_("use --subrepos for recursive commit"))
1397 1412 subs.append(s)
1398 1413 commitsubs.add(s)
1399 1414 else:
1400 1415 bs = wctx.sub(s).basestate()
1401 1416 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1402 1417 if oldstate.get(s, (None, None, None))[1] != bs:
1403 1418 subs.append(s)
1404 1419
1405 1420 # check for removed subrepos
1406 1421 for p in wctx.parents():
1407 1422 r = [s for s in p.substate if s not in newstate]
1408 1423 subs += [s for s in r if match(s)]
1409 1424 if subs:
1410 1425 if (not match('.hgsub') and
1411 1426 '.hgsub' in (wctx.modified() + wctx.added())):
1412 1427 raise util.Abort(
1413 1428 _("can't commit subrepos without .hgsub"))
1414 1429 status.modified.insert(0, '.hgsubstate')
1415 1430
1416 1431 elif '.hgsub' in status.removed:
1417 1432 # clean up .hgsubstate when .hgsub is removed
1418 1433 if ('.hgsubstate' in wctx and
1419 1434 '.hgsubstate' not in (status.modified + status.added +
1420 1435 status.removed)):
1421 1436 status.removed.insert(0, '.hgsubstate')
1422 1437
1423 1438 # make sure all explicit patterns are matched
1424 1439 if not force and match.files():
1425 1440 matched = set(status.modified + status.added + status.removed)
1426 1441
1427 1442 for f in match.files():
1428 1443 f = self.dirstate.normalize(f)
1429 1444 if f == '.' or f in matched or f in wctx.substate:
1430 1445 continue
1431 1446 if f in status.deleted:
1432 1447 fail(f, _('file not found!'))
1433 1448 if f in vdirs: # visited directory
1434 1449 d = f + '/'
1435 1450 for mf in matched:
1436 1451 if mf.startswith(d):
1437 1452 break
1438 1453 else:
1439 1454 fail(f, _("no match under directory!"))
1440 1455 elif f not in self.dirstate:
1441 1456 fail(f, _("file not tracked!"))
1442 1457
1443 1458 cctx = context.workingcommitctx(self, status,
1444 1459 text, user, date, extra)
1445 1460
1446 1461 if (not force and not extra.get("close") and not merge
1447 1462 and not cctx.files()
1448 1463 and wctx.branch() == wctx.p1().branch()):
1449 1464 return None
1450 1465
1451 1466 if merge and cctx.deleted():
1452 1467 raise util.Abort(_("cannot commit merge with missing files"))
1453 1468
1454 1469 ms = mergemod.mergestate(self)
1455 1470 for f in status.modified:
1456 1471 if f in ms and ms[f] == 'u':
1457 1472 raise util.Abort(_('unresolved merge conflicts '
1458 1473 '(see "hg help resolve")'))
1459 1474
1460 1475 if editor:
1461 1476 cctx._text = editor(self, cctx, subs)
1462 1477 edited = (text != cctx._text)
1463 1478
1464 1479 # Save commit message in case this transaction gets rolled back
1465 1480 # (e.g. by a pretxncommit hook). Leave the content alone on
1466 1481 # the assumption that the user will use the same editor again.
1467 1482 msgfn = self.savecommitmessage(cctx._text)
1468 1483
1469 1484 # commit subs and write new state
1470 1485 if subs:
1471 1486 for s in sorted(commitsubs):
1472 1487 sub = wctx.sub(s)
1473 1488 self.ui.status(_('committing subrepository %s\n') %
1474 1489 subrepo.subrelpath(sub))
1475 1490 sr = sub.commit(cctx._text, user, date)
1476 1491 newstate[s] = (newstate[s][0], sr)
1477 1492 subrepo.writestate(self, newstate)
1478 1493
1479 1494 p1, p2 = self.dirstate.parents()
1480 1495 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1481 1496 try:
1482 1497 self.hook("precommit", throw=True, parent1=hookp1,
1483 1498 parent2=hookp2)
1484 1499 ret = self.commitctx(cctx, True)
1485 1500 except: # re-raises
1486 1501 if edited:
1487 1502 self.ui.write(
1488 1503 _('note: commit message saved in %s\n') % msgfn)
1489 1504 raise
1490 1505
1491 1506 # update bookmarks, dirstate and mergestate
1492 1507 bookmarks.update(self, [p1, p2], ret)
1493 1508 cctx.markcommitted(ret)
1494 1509 ms.reset()
1495 1510 finally:
1496 1511 wlock.release()
1497 1512
1498 1513 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1499 1514 # hack for command that use a temporary commit (eg: histedit)
1500 1515 # temporary commit got stripped before hook release
1501 1516 if node in self:
1502 1517 self.hook("commit", node=node, parent1=parent1,
1503 1518 parent2=parent2)
1504 1519 self._afterlock(commithook)
1505 1520 return ret
1506 1521
1507 1522 @unfilteredmethod
1508 1523 def commitctx(self, ctx, error=False):
1509 1524 """Add a new revision to current repository.
1510 1525 Revision information is passed via the context argument.
1511 1526 """
1512 1527
1513 1528 tr = None
1514 1529 p1, p2 = ctx.p1(), ctx.p2()
1515 1530 user = ctx.user()
1516 1531
1517 1532 lock = self.lock()
1518 1533 try:
1519 1534 tr = self.transaction("commit")
1520 1535 trp = weakref.proxy(tr)
1521 1536
1522 1537 if ctx.files():
1523 1538 m1 = p1.manifest()
1524 1539 m2 = p2.manifest()
1525 1540 m = m1.copy()
1526 1541
1527 1542 # check in files
1528 1543 added = []
1529 1544 changed = []
1530 1545 removed = list(ctx.removed())
1531 1546 linkrev = len(self)
1532 1547 self.ui.note(_("committing files:\n"))
1533 1548 for f in sorted(ctx.modified() + ctx.added()):
1534 1549 self.ui.note(f + "\n")
1535 1550 try:
1536 1551 fctx = ctx[f]
1537 1552 if fctx is None:
1538 1553 removed.append(f)
1539 1554 else:
1540 1555 added.append(f)
1541 1556 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1542 1557 trp, changed)
1543 1558 m.setflag(f, fctx.flags())
1544 1559 except OSError, inst:
1545 1560 self.ui.warn(_("trouble committing %s!\n") % f)
1546 1561 raise
1547 1562 except IOError, inst:
1548 1563 errcode = getattr(inst, 'errno', errno.ENOENT)
1549 1564 if error or errcode and errcode != errno.ENOENT:
1550 1565 self.ui.warn(_("trouble committing %s!\n") % f)
1551 1566 raise
1552 1567
1553 1568 # update manifest
1554 1569 self.ui.note(_("committing manifest\n"))
1555 1570 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1556 1571 drop = [f for f in removed if f in m]
1557 1572 for f in drop:
1558 1573 del m[f]
1559 1574 mn = self.manifest.add(m, trp, linkrev,
1560 1575 p1.manifestnode(), p2.manifestnode(),
1561 1576 added, drop)
1562 1577 files = changed + removed
1563 1578 else:
1564 1579 mn = p1.manifestnode()
1565 1580 files = []
1566 1581
1567 1582 # update changelog
1568 1583 self.ui.note(_("committing changelog\n"))
1569 1584 self.changelog.delayupdate(tr)
1570 1585 n = self.changelog.add(mn, files, ctx.description(),
1571 1586 trp, p1.node(), p2.node(),
1572 1587 user, ctx.date(), ctx.extra().copy())
1573 1588 p = lambda: tr.writepending() and self.root or ""
1574 1589 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1575 1590 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1576 1591 parent2=xp2, pending=p)
1577 1592 # set the new commit is proper phase
1578 1593 targetphase = subrepo.newcommitphase(self.ui, ctx)
1579 1594 if targetphase:
1580 1595 # retract boundary do not alter parent changeset.
1581 1596 # if a parent have higher the resulting phase will
1582 1597 # be compliant anyway
1583 1598 #
1584 1599 # if minimal phase was 0 we don't need to retract anything
1585 1600 phases.retractboundary(self, tr, targetphase, [n])
1586 1601 tr.close()
1587 1602 branchmap.updatecache(self.filtered('served'))
1588 1603 return n
1589 1604 finally:
1590 1605 if tr:
1591 1606 tr.release()
1592 1607 lock.release()
1593 1608
1594 1609 @unfilteredmethod
1595 1610 def destroying(self):
1596 1611 '''Inform the repository that nodes are about to be destroyed.
1597 1612 Intended for use by strip and rollback, so there's a common
1598 1613 place for anything that has to be done before destroying history.
1599 1614
1600 1615 This is mostly useful for saving state that is in memory and waiting
1601 1616 to be flushed when the current lock is released. Because a call to
1602 1617 destroyed is imminent, the repo will be invalidated causing those
1603 1618 changes to stay in memory (waiting for the next unlock), or vanish
1604 1619 completely.
1605 1620 '''
1606 1621 # When using the same lock to commit and strip, the phasecache is left
1607 1622 # dirty after committing. Then when we strip, the repo is invalidated,
1608 1623 # causing those changes to disappear.
1609 1624 if '_phasecache' in vars(self):
1610 1625 self._phasecache.write()
1611 1626
1612 1627 @unfilteredmethod
1613 1628 def destroyed(self):
1614 1629 '''Inform the repository that nodes have been destroyed.
1615 1630 Intended for use by strip and rollback, so there's a common
1616 1631 place for anything that has to be done after destroying history.
1617 1632 '''
1618 1633 # When one tries to:
1619 1634 # 1) destroy nodes thus calling this method (e.g. strip)
1620 1635 # 2) use phasecache somewhere (e.g. commit)
1621 1636 #
1622 1637 # then 2) will fail because the phasecache contains nodes that were
1623 1638 # removed. We can either remove phasecache from the filecache,
1624 1639 # causing it to reload next time it is accessed, or simply filter
1625 1640 # the removed nodes now and write the updated cache.
1626 1641 self._phasecache.filterunknown(self)
1627 1642 self._phasecache.write()
1628 1643
1629 1644 # update the 'served' branch cache to help read only server process
1630 1645 # Thanks to branchcache collaboration this is done from the nearest
1631 1646 # filtered subset and it is expected to be fast.
1632 1647 branchmap.updatecache(self.filtered('served'))
1633 1648
1634 1649 # Ensure the persistent tag cache is updated. Doing it now
1635 1650 # means that the tag cache only has to worry about destroyed
1636 1651 # heads immediately after a strip/rollback. That in turn
1637 1652 # guarantees that "cachetip == currenttip" (comparing both rev
1638 1653 # and node) always means no nodes have been added or destroyed.
1639 1654
1640 1655 # XXX this is suboptimal when qrefresh'ing: we strip the current
1641 1656 # head, refresh the tag cache, then immediately add a new head.
1642 1657 # But I think doing it this way is necessary for the "instant
1643 1658 # tag cache retrieval" case to work.
1644 1659 self.invalidate()
1645 1660
1646 1661 def walk(self, match, node=None):
1647 1662 '''
1648 1663 walk recursively through the directory tree or a given
1649 1664 changeset, finding all files matched by the match
1650 1665 function
1651 1666 '''
1652 1667 return self[node].walk(match)
1653 1668
1654 1669 def status(self, node1='.', node2=None, match=None,
1655 1670 ignored=False, clean=False, unknown=False,
1656 1671 listsubrepos=False):
1657 1672 '''a convenience method that calls node1.status(node2)'''
1658 1673 return self[node1].status(node2, match, ignored, clean, unknown,
1659 1674 listsubrepos)
1660 1675
1661 1676 def heads(self, start=None):
1662 1677 heads = self.changelog.heads(start)
1663 1678 # sort the output in rev descending order
1664 1679 return sorted(heads, key=self.changelog.rev, reverse=True)
1665 1680
1666 1681 def branchheads(self, branch=None, start=None, closed=False):
1667 1682 '''return a (possibly filtered) list of heads for the given branch
1668 1683
1669 1684 Heads are returned in topological order, from newest to oldest.
1670 1685 If branch is None, use the dirstate branch.
1671 1686 If start is not None, return only heads reachable from start.
1672 1687 If closed is True, return heads that are marked as closed as well.
1673 1688 '''
1674 1689 if branch is None:
1675 1690 branch = self[None].branch()
1676 1691 branches = self.branchmap()
1677 1692 if branch not in branches:
1678 1693 return []
1679 1694 # the cache returns heads ordered lowest to highest
1680 1695 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1681 1696 if start is not None:
1682 1697 # filter out the heads that cannot be reached from startrev
1683 1698 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1684 1699 bheads = [h for h in bheads if h in fbheads]
1685 1700 return bheads
1686 1701
1687 1702 def branches(self, nodes):
1688 1703 if not nodes:
1689 1704 nodes = [self.changelog.tip()]
1690 1705 b = []
1691 1706 for n in nodes:
1692 1707 t = n
1693 1708 while True:
1694 1709 p = self.changelog.parents(n)
1695 1710 if p[1] != nullid or p[0] == nullid:
1696 1711 b.append((t, n, p[0], p[1]))
1697 1712 break
1698 1713 n = p[0]
1699 1714 return b
1700 1715
1701 1716 def between(self, pairs):
1702 1717 r = []
1703 1718
1704 1719 for top, bottom in pairs:
1705 1720 n, l, i = top, [], 0
1706 1721 f = 1
1707 1722
1708 1723 while n != bottom and n != nullid:
1709 1724 p = self.changelog.parents(n)[0]
1710 1725 if i == f:
1711 1726 l.append(n)
1712 1727 f = f * 2
1713 1728 n = p
1714 1729 i += 1
1715 1730
1716 1731 r.append(l)
1717 1732
1718 1733 return r
1719 1734
1720 1735 def checkpush(self, pushop):
1721 1736 """Extensions can override this function if additional checks have
1722 1737 to be performed before pushing, or call it if they override push
1723 1738 command.
1724 1739 """
1725 1740 pass
1726 1741
1727 1742 @unfilteredpropertycache
1728 1743 def prepushoutgoinghooks(self):
1729 1744 """Return util.hooks consists of "(repo, remote, outgoing)"
1730 1745 functions, which are called before pushing changesets.
1731 1746 """
1732 1747 return util.hooks()
1733 1748
1734 1749 def stream_in(self, remote, requirements):
1735 1750 lock = self.lock()
1736 1751 try:
1737 1752 # Save remote branchmap. We will use it later
1738 1753 # to speed up branchcache creation
1739 1754 rbranchmap = None
1740 1755 if remote.capable("branchmap"):
1741 1756 rbranchmap = remote.branchmap()
1742 1757
1743 1758 fp = remote.stream_out()
1744 1759 l = fp.readline()
1745 1760 try:
1746 1761 resp = int(l)
1747 1762 except ValueError:
1748 1763 raise error.ResponseError(
1749 1764 _('unexpected response from remote server:'), l)
1750 1765 if resp == 1:
1751 1766 raise util.Abort(_('operation forbidden by server'))
1752 1767 elif resp == 2:
1753 1768 raise util.Abort(_('locking the remote repository failed'))
1754 1769 elif resp != 0:
1755 1770 raise util.Abort(_('the server sent an unknown error code'))
1756 1771 self.ui.status(_('streaming all changes\n'))
1757 1772 l = fp.readline()
1758 1773 try:
1759 1774 total_files, total_bytes = map(int, l.split(' ', 1))
1760 1775 except (ValueError, TypeError):
1761 1776 raise error.ResponseError(
1762 1777 _('unexpected response from remote server:'), l)
1763 1778 self.ui.status(_('%d files to transfer, %s of data\n') %
1764 1779 (total_files, util.bytecount(total_bytes)))
1765 1780 handled_bytes = 0
1766 1781 self.ui.progress(_('clone'), 0, total=total_bytes)
1767 1782 start = time.time()
1768 1783
1769 1784 tr = self.transaction(_('clone'))
1770 1785 try:
1771 1786 for i in xrange(total_files):
1772 1787 # XXX doesn't support '\n' or '\r' in filenames
1773 1788 l = fp.readline()
1774 1789 try:
1775 1790 name, size = l.split('\0', 1)
1776 1791 size = int(size)
1777 1792 except (ValueError, TypeError):
1778 1793 raise error.ResponseError(
1779 1794 _('unexpected response from remote server:'), l)
1780 1795 if self.ui.debugflag:
1781 1796 self.ui.debug('adding %s (%s)\n' %
1782 1797 (name, util.bytecount(size)))
1783 1798 # for backwards compat, name was partially encoded
1784 1799 ofp = self.svfs(store.decodedir(name), 'w')
1785 1800 for chunk in util.filechunkiter(fp, limit=size):
1786 1801 handled_bytes += len(chunk)
1787 1802 self.ui.progress(_('clone'), handled_bytes,
1788 1803 total=total_bytes)
1789 1804 ofp.write(chunk)
1790 1805 ofp.close()
1791 1806 tr.close()
1792 1807 finally:
1793 1808 tr.release()
1794 1809
1795 1810 # Writing straight to files circumvented the inmemory caches
1796 1811 self.invalidate()
1797 1812
1798 1813 elapsed = time.time() - start
1799 1814 if elapsed <= 0:
1800 1815 elapsed = 0.001
1801 1816 self.ui.progress(_('clone'), None)
1802 1817 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1803 1818 (util.bytecount(total_bytes), elapsed,
1804 1819 util.bytecount(total_bytes / elapsed)))
1805 1820
1806 1821 # new requirements = old non-format requirements +
1807 1822 # new format-related
1808 1823 # requirements from the streamed-in repository
1809 1824 requirements.update(set(self.requirements) - self.supportedformats)
1810 1825 self._applyrequirements(requirements)
1811 1826 self._writerequirements()
1812 1827
1813 1828 if rbranchmap:
1814 1829 rbheads = []
1815 1830 closed = []
1816 1831 for bheads in rbranchmap.itervalues():
1817 1832 rbheads.extend(bheads)
1818 1833 for h in bheads:
1819 1834 r = self.changelog.rev(h)
1820 1835 b, c = self.changelog.branchinfo(r)
1821 1836 if c:
1822 1837 closed.append(h)
1823 1838
1824 1839 if rbheads:
1825 1840 rtiprev = max((int(self.changelog.rev(node))
1826 1841 for node in rbheads))
1827 1842 cache = branchmap.branchcache(rbranchmap,
1828 1843 self[rtiprev].node(),
1829 1844 rtiprev,
1830 1845 closednodes=closed)
1831 1846 # Try to stick it as low as possible
1832 1847 # filter above served are unlikely to be fetch from a clone
1833 1848 for candidate in ('base', 'immutable', 'served'):
1834 1849 rview = self.filtered(candidate)
1835 1850 if cache.validfor(rview):
1836 1851 self._branchcaches[candidate] = cache
1837 1852 cache.write(rview)
1838 1853 break
1839 1854 self.invalidate()
1840 1855 return len(self.heads()) + 1
1841 1856 finally:
1842 1857 lock.release()
1843 1858
1844 1859 def clone(self, remote, heads=[], stream=None):
1845 1860 '''clone remote repository.
1846 1861
1847 1862 keyword arguments:
1848 1863 heads: list of revs to clone (forces use of pull)
1849 1864 stream: use streaming clone if possible'''
1850 1865
1851 1866 # now, all clients that can request uncompressed clones can
1852 1867 # read repo formats supported by all servers that can serve
1853 1868 # them.
1854 1869
1855 1870 # if revlog format changes, client will have to check version
1856 1871 # and format flags on "stream" capability, and use
1857 1872 # uncompressed only if compatible.
1858 1873
1859 1874 if stream is None:
1860 1875 # if the server explicitly prefers to stream (for fast LANs)
1861 1876 stream = remote.capable('stream-preferred')
1862 1877
1863 1878 if stream and not heads:
1864 1879 # 'stream' means remote revlog format is revlogv1 only
1865 1880 if remote.capable('stream'):
1866 1881 self.stream_in(remote, set(('revlogv1',)))
1867 1882 else:
1868 1883 # otherwise, 'streamreqs' contains the remote revlog format
1869 1884 streamreqs = remote.capable('streamreqs')
1870 1885 if streamreqs:
1871 1886 streamreqs = set(streamreqs.split(','))
1872 1887 # if we support it, stream in and adjust our requirements
1873 1888 if not streamreqs - self.supportedformats:
1874 1889 self.stream_in(remote, streamreqs)
1875 1890
1876 1891 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1877 1892 try:
1878 1893 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1879 1894 ret = exchange.pull(self, remote, heads).cgresult
1880 1895 finally:
1881 1896 self.ui.restoreconfig(quiet)
1882 1897 return ret
1883 1898
1884 1899 def pushkey(self, namespace, key, old, new):
1885 1900 try:
1886 1901 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1887 1902 old=old, new=new)
1888 1903 except error.HookAbort, exc:
1889 1904 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1890 1905 if exc.hint:
1891 1906 self.ui.write_err(_("(%s)\n") % exc.hint)
1892 1907 return False
1893 1908 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1894 1909 ret = pushkey.push(self, namespace, key, old, new)
1895 1910 def runhook():
1896 1911 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1897 1912 ret=ret)
1898 1913 self._afterlock(runhook)
1899 1914 return ret
1900 1915
1901 1916 def listkeys(self, namespace):
1902 1917 self.hook('prelistkeys', throw=True, namespace=namespace)
1903 1918 self.ui.debug('listing keys for "%s"\n' % namespace)
1904 1919 values = pushkey.list(self, namespace)
1905 1920 self.hook('listkeys', namespace=namespace, values=values)
1906 1921 return values
1907 1922
1908 1923 def debugwireargs(self, one, two, three=None, four=None, five=None):
1909 1924 '''used to test argument passing over the wire'''
1910 1925 return "%s %s %s %s %s" % (one, two, three, four, five)
1911 1926
1912 1927 def savecommitmessage(self, text):
1913 1928 fp = self.vfs('last-message.txt', 'wb')
1914 1929 try:
1915 1930 fp.write(text)
1916 1931 finally:
1917 1932 fp.close()
1918 1933 return self.pathto(fp.name[len(self.root) + 1:])
1919 1934
1920 1935 # used to avoid circular references so destructors work
1921 1936 def aftertrans(files):
1922 1937 renamefiles = [tuple(t) for t in files]
1923 1938 def a():
1924 1939 for vfs, src, dest in renamefiles:
1925 1940 try:
1926 1941 vfs.rename(src, dest)
1927 1942 except OSError: # journal file does not yet exist
1928 1943 pass
1929 1944 return a
1930 1945
1931 1946 def undoname(fn):
1932 1947 base, name = os.path.split(fn)
1933 1948 assert name.startswith('journal')
1934 1949 return os.path.join(base, name.replace('journal', 'undo', 1))
1935 1950
1936 1951 def instance(ui, path, create):
1937 1952 return localrepository(ui, util.urllocalpath(path), create)
1938 1953
1939 1954 def islocal(path):
1940 1955 return True
@@ -1,598 +1,602 b''
1 1 Test exchange of common information using bundle2
2 2
3 3
4 4 $ getmainid() {
5 5 > hg -R main log --template '{node}\n' --rev "$1"
6 6 > }
7 7
8 8 enable obsolescence
9 9
10 10 $ cat > $TESTTMP/bundle2-pushkey-hook.sh << EOF
11 11 > echo pushkey: lock state after \"\$HG_NAMESPACE\"
12 12 > hg debuglock
13 13 > EOF
14 14
15 15 $ cat >> $HGRCPATH << EOF
16 16 > [experimental]
17 17 > evolution=createmarkers,exchange
18 18 > bundle2-exp=True
19 19 > [ui]
20 20 > ssh=python "$TESTDIR/dummyssh"
21 21 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
22 22 > [web]
23 23 > push_ssl = false
24 24 > allow_push = *
25 25 > [phases]
26 26 > publish=False
27 27 > [hooks]
28 28 > pretxnclose.tip = hg log -r tip -T "pre-close-tip:{node|short} {phase} {bookmarks}\n"
29 29 > txnclose.tip = hg log -r tip -T "postclose-tip:{node|short} {phase} {bookmarks}\n"
30 30 > txnclose.env = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" txnclose"
31 31 > pushkey= sh "$TESTTMP/bundle2-pushkey-hook.sh"
32 32 > EOF
33 33
34 34 The extension requires a repo (currently unused)
35 35
36 36 $ hg init main
37 37 $ cd main
38 38 $ touch a
39 39 $ hg add a
40 40 $ hg commit -m 'a'
41 41 pre-close-tip:3903775176ed draft
42 42 postclose-tip:3903775176ed draft
43 43 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
44 44
45 45 $ hg unbundle $TESTDIR/bundles/rebase.hg
46 46 adding changesets
47 47 adding manifests
48 48 adding file changes
49 49 added 8 changesets with 7 changes to 7 files (+3 heads)
50 50 pre-close-tip:02de42196ebe draft
51 51 postclose-tip:02de42196ebe draft
52 52 txnclose hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=unbundle HG_TXNID=TXN:* HG_TXNNAME=unbundle (glob)
53 53 bundle:*/tests/bundles/rebase.hg HG_URL=bundle:*/tests/bundles/rebase.hg (glob)
54 54 (run 'hg heads' to see heads, 'hg merge' to merge)
55 55
56 56 $ cd ..
57 57
58 58 Real world exchange
59 59 =====================
60 60
61 61 Add more obsolescence information
62 62
63 63 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
64 64 pre-close-tip:02de42196ebe draft
65 65 postclose-tip:02de42196ebe draft
66 66 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
67 67 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
68 68 pre-close-tip:02de42196ebe draft
69 69 postclose-tip:02de42196ebe draft
70 70 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
71 71
72 72 clone --pull
73 73
74 74 $ hg -R main phase --public cd010b8cd998
75 75 pre-close-tip:000000000000 public
76 76 postclose-tip:02de42196ebe draft
77 77 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
78 78 $ hg clone main other --pull --rev 9520eea781bc
79 79 adding changesets
80 80 adding manifests
81 81 adding file changes
82 82 added 2 changesets with 2 changes to 2 files
83 83 1 new obsolescence markers
84 84 pre-close-tip:9520eea781bc draft
85 85 postclose-tip:9520eea781bc draft
86 86 txnclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
87 87 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
88 88 updating to branch default
89 89 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 90 $ hg -R other log -G
91 91 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
92 92 |
93 93 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
94 94
95 95 $ hg -R other debugobsolete
96 96 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
97 97
98 98 pull
99 99
100 100 $ hg -R main phase --public 9520eea781bc
101 101 pre-close-tip:000000000000 public
102 102 postclose-tip:02de42196ebe draft
103 103 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
104 104 $ hg -R other pull -r 24b6387c8c8c
105 105 pulling from $TESTTMP/main (glob)
106 106 searching for changes
107 107 adding changesets
108 108 adding manifests
109 109 adding file changes
110 110 added 1 changesets with 1 changes to 1 files (+1 heads)
111 111 1 new obsolescence markers
112 112 pre-close-tip:24b6387c8c8c draft
113 113 postclose-tip:24b6387c8c8c draft
114 114 txnclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
115 115 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
116 116 (run 'hg heads' to see heads, 'hg merge' to merge)
117 117 $ hg -R other log -G
118 118 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
119 119 |
120 120 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
121 121 |/
122 122 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
123 123
124 124 $ hg -R other debugobsolete
125 125 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
126 126 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
127 127
128 128 pull empty (with phase movement)
129 129
130 130 $ hg -R main phase --public 24b6387c8c8c
131 131 pre-close-tip:000000000000 public
132 132 postclose-tip:02de42196ebe draft
133 133 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
134 134 $ hg -R other pull -r 24b6387c8c8c
135 135 pulling from $TESTTMP/main (glob)
136 136 no changes found
137 137 pre-close-tip:000000000000 public
138 138 postclose-tip:24b6387c8c8c public
139 139 txnclose hook: HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
140 140 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
141 141 $ hg -R other log -G
142 142 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
143 143 |
144 144 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
145 145 |/
146 146 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
147 147
148 148 $ hg -R other debugobsolete
149 149 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
150 150 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
151 151
152 152 pull empty
153 153
154 154 $ hg -R other pull -r 24b6387c8c8c
155 155 pulling from $TESTTMP/main (glob)
156 156 no changes found
157 157 pre-close-tip:24b6387c8c8c public
158 158 postclose-tip:24b6387c8c8c public
159 159 txnclose hook: HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
160 160 file:/*/$TESTTMP/main HG_URL=file:$TESTTMP/main (glob)
161 161 $ hg -R other log -G
162 162 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
163 163 |
164 164 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
165 165 |/
166 166 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
167 167
168 168 $ hg -R other debugobsolete
169 169 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
170 170 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
171 171
172 172 add extra data to test their exchange during push
173 173
174 174 $ hg -R main bookmark --rev eea13746799a book_eea1
175 175 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
176 176 pre-close-tip:02de42196ebe draft
177 177 postclose-tip:02de42196ebe draft
178 178 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
179 179 $ hg -R main bookmark --rev 02de42196ebe book_02de
180 180 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
181 181 pre-close-tip:02de42196ebe draft book_02de
182 182 postclose-tip:02de42196ebe draft book_02de
183 183 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
184 184 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
185 185 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
186 186 pre-close-tip:02de42196ebe draft book_02de
187 187 postclose-tip:02de42196ebe draft book_02de
188 188 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
189 189 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
190 190 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
191 191 pre-close-tip:02de42196ebe draft book_02de
192 192 postclose-tip:02de42196ebe draft book_02de
193 193 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
194 194 $ hg -R main bookmark --rev 32af7686d403 book_32af
195 195 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
196 196 pre-close-tip:02de42196ebe draft book_02de
197 197 postclose-tip:02de42196ebe draft book_02de
198 198 txnclose hook: HG_NEW_OBSMARKERS=1 HG_TXNID=TXN:* HG_TXNNAME=debugobsolete (glob)
199 199
200 200 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
201 201 $ hg -R other bookmark --rev cd010b8cd998 book_02de
202 202 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
203 203 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
204 204 $ hg -R other bookmark --rev cd010b8cd998 book_32af
205 205
206 206 $ hg -R main phase --public eea13746799a
207 207 pre-close-tip:000000000000 public
208 208 postclose-tip:02de42196ebe draft book_02de
209 209 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
210 210
211 211 push
212 212 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
213 213 pushing to other
214 214 searching for changes
215 215 pre-close-tip:eea13746799a public book_eea1
216 216 pushkey: lock state after "phases"
217 217 lock: free
218 218 wlock: free
219 219 pushkey: lock state after "bookmarks"
220 220 lock: free
221 221 wlock: free
222 222 postclose-tip:eea13746799a public book_eea1
223 223 txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_TXNID=TXN:* HG_TXNNAME=push HG_URL=push (glob)
224 224 remote: adding changesets
225 225 remote: adding manifests
226 226 remote: adding file changes
227 227 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
228 228 remote: 1 new obsolescence markers
229 229 updating bookmark book_eea1
230 230 pre-close-tip:02de42196ebe draft book_02de
231 231 postclose-tip:02de42196ebe draft book_02de
232 232 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
233 233 file:/*/$TESTTMP/other HG_URL=file:$TESTTMP/other (glob)
234 234 $ hg -R other log -G
235 235 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
236 236 |\
237 237 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
238 238 | |
239 239 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
240 240 |/
241 241 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
242 242
243 243 $ hg -R other debugobsolete
244 244 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
245 245 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
246 246 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
247 247
248 248 pull over ssh
249 249
250 250 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
251 251 pulling from ssh://user@dummy/main
252 252 searching for changes
253 253 adding changesets
254 254 adding manifests
255 255 adding file changes
256 256 added 1 changesets with 1 changes to 1 files (+1 heads)
257 257 1 new obsolescence markers
258 258 updating bookmark book_02de
259 259 pre-close-tip:02de42196ebe draft book_02de
260 260 postclose-tip:02de42196ebe draft book_02de
261 261 txnclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
262 262 ssh://user@dummy/main HG_URL=ssh://user@dummy/main
263 263 (run 'hg heads' to see heads, 'hg merge' to merge)
264 264 $ hg -R other debugobsolete
265 265 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
266 266 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
267 267 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
268 268 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
269 269
270 270 pull over http
271 271
272 272 $ hg -R main serve -p $HGPORT -d --pid-file=main.pid -E main-error.log
273 273 $ cat main.pid >> $DAEMON_PIDS
274 274
275 275 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
276 276 pulling from http://localhost:$HGPORT/
277 277 searching for changes
278 278 adding changesets
279 279 adding manifests
280 280 adding file changes
281 281 added 1 changesets with 1 changes to 1 files (+1 heads)
282 282 1 new obsolescence markers
283 283 updating bookmark book_42cc
284 284 pre-close-tip:42ccdea3bb16 draft book_42cc
285 285 postclose-tip:42ccdea3bb16 draft book_42cc
286 286 txnclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_TXNID=TXN:* HG_TXNNAME=pull (glob)
287 287 http://localhost:$HGPORT/ HG_URL=http://localhost:$HGPORT/
288 288 (run 'hg heads .' to see heads, 'hg merge' to merge)
289 289 $ cat main-error.log
290 290 $ hg -R other debugobsolete
291 291 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
292 292 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
293 293 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
294 294 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
295 295 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
296 296
297 297 push over ssh
298 298
299 299 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
300 300 pushing to ssh://user@dummy/other
301 301 searching for changes
302 302 remote: adding changesets
303 303 remote: adding manifests
304 304 remote: adding file changes
305 305 remote: added 1 changesets with 1 changes to 1 files
306 306 remote: 1 new obsolescence markers
307 307 updating bookmark book_5fdd
308 308 remote: pre-close-tip:5fddd98957c8 draft book_5fdd
309 309 remote: pushkey: lock state after "bookmarks"
310 310 remote: lock: free
311 311 remote: wlock: free
312 312 remote: postclose-tip:5fddd98957c8 draft book_5fdd
313 313 remote: txnclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_TXNID=TXN:* HG_TXNNAME=serve HG_URL=remote:ssh:127.0.0.1 (glob)
314 314 pre-close-tip:02de42196ebe draft book_02de
315 315 postclose-tip:02de42196ebe draft book_02de
316 316 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
317 317 ssh://user@dummy/other HG_URL=ssh://user@dummy/other
318 318 $ hg -R other log -G
319 319 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
320 320 |
321 321 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
322 322 |
323 323 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
324 324 | |
325 325 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
326 326 | |/|
327 327 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
328 328 |/ /
329 329 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
330 330 |/
331 331 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
332 332
333 333 $ hg -R other debugobsolete
334 334 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
335 335 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
336 336 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
337 337 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
338 338 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
339 339 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
340 340
341 341 push over http
342 342
343 343 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
344 344 $ cat other.pid >> $DAEMON_PIDS
345 345
346 346 $ hg -R main phase --public 32af7686d403
347 347 pre-close-tip:000000000000 public
348 348 postclose-tip:02de42196ebe draft book_02de
349 349 txnclose hook: HG_PHASES_MOVED=1 HG_TXNID=TXN:* HG_TXNNAME=phase (glob)
350 350 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
351 351 pushing to http://localhost:$HGPORT2/
352 352 searching for changes
353 353 remote: adding changesets
354 354 remote: adding manifests
355 355 remote: adding file changes
356 356 remote: added 1 changesets with 1 changes to 1 files
357 357 remote: 1 new obsolescence markers
358 358 updating bookmark book_32af
359 359 pre-close-tip:02de42196ebe draft book_02de
360 360 postclose-tip:02de42196ebe draft book_02de
361 361 txnclose hook: HG_SOURCE=push-response HG_TXNID=TXN:* HG_TXNNAME=push-response (glob)
362 362 http://localhost:$HGPORT2/ HG_URL=http://localhost:$HGPORT2/
363 363 $ cat other-error.log
364 364
365 365 Check final content.
366 366
367 367 $ hg -R other log -G
368 368 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
369 369 |
370 370 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
371 371 |
372 372 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
373 373 |
374 374 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
375 375 | |
376 376 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
377 377 | |/|
378 378 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
379 379 |/ /
380 380 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
381 381 |/
382 382 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
383 383
384 384 $ hg -R other debugobsolete
385 385 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
386 386 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
387 387 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
388 388 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
389 389 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
390 390 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
391 391 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
392 392
393 393 (check that no 'pending' files remain)
394 394
395 395 $ ls -1 other/.hg/bookmarks*
396 396 other/.hg/bookmarks
397 397 $ ls -1 other/.hg/store/phaseroots*
398 398 other/.hg/store/phaseroots
399 399 $ ls -1 other/.hg/store/00changelog.i*
400 400 other/.hg/store/00changelog.i
401 401
402 402 Error Handling
403 403 ==============
404 404
405 405 Check that errors are properly returned to the client during push.
406 406
407 407 Setting up
408 408
409 409 $ cat > failpush.py << EOF
410 410 > """A small extension that makes push fails when using bundle2
411 411 >
412 412 > used to test error handling in bundle2
413 413 > """
414 414 >
415 415 > from mercurial import util
416 416 > from mercurial import bundle2
417 417 > from mercurial import exchange
418 418 > from mercurial import extensions
419 419 >
420 420 > def _pushbundle2failpart(pushop, bundler):
421 421 > reason = pushop.ui.config('failpush', 'reason', None)
422 422 > part = None
423 423 > if reason == 'abort':
424 424 > bundler.newpart('test:abort')
425 425 > if reason == 'unknown':
426 426 > bundler.newpart('test:unknown')
427 427 > if reason == 'race':
428 428 > # 20 Bytes of crap
429 429 > bundler.newpart('check:heads', data='01234567890123456789')
430 430 >
431 431 > @bundle2.parthandler("test:abort")
432 432 > def handleabort(op, part):
433 433 > raise util.Abort('Abandon ship!', hint="don't panic")
434 434 >
435 435 > def uisetup(ui):
436 436 > exchange.b2partsgenmapping['failpart'] = _pushbundle2failpart
437 437 > exchange.b2partsgenorder.insert(0, 'failpart')
438 438 >
439 439 > EOF
440 440
441 441 $ cd main
442 442 $ hg up tip
443 443 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
444 444 $ echo 'I' > I
445 445 $ hg add I
446 446 $ hg ci -m 'I'
447 447 pre-close-tip:e7ec4e813ba6 draft
448 448 postclose-tip:e7ec4e813ba6 draft
449 449 txnclose hook: HG_TXNID=TXN:* HG_TXNNAME=commit (glob)
450 450 $ hg id
451 451 e7ec4e813ba6 tip
452 452 $ cd ..
453 453
454 454 $ cat << EOF >> $HGRCPATH
455 455 > [extensions]
456 456 > failpush=$TESTTMP/failpush.py
457 457 > EOF
458 458
459 459 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
460 460 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
461 461 $ cat other.pid >> $DAEMON_PIDS
462 462
463 463 Doing the actual push: Abort error
464 464
465 465 $ cat << EOF >> $HGRCPATH
466 466 > [failpush]
467 467 > reason = abort
468 468 > EOF
469 469
470 470 $ hg -R main push other -r e7ec4e813ba6
471 471 pushing to other
472 472 searching for changes
473 473 abort: Abandon ship!
474 474 (don't panic)
475 475 [255]
476 476
477 477 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
478 478 pushing to ssh://user@dummy/other
479 479 searching for changes
480 480 abort: Abandon ship!
481 481 (don't panic)
482 482 [255]
483 483
484 484 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
485 485 pushing to http://localhost:$HGPORT2/
486 486 searching for changes
487 487 abort: Abandon ship!
488 488 (don't panic)
489 489 [255]
490 490
491 491
492 492 Doing the actual push: unknown mandatory parts
493 493
494 494 $ cat << EOF >> $HGRCPATH
495 495 > [failpush]
496 496 > reason = unknown
497 497 > EOF
498 498
499 499 $ hg -R main push other -r e7ec4e813ba6
500 500 pushing to other
501 501 searching for changes
502 502 abort: missing support for test:unknown
503 503 [255]
504 504
505 505 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
506 506 pushing to ssh://user@dummy/other
507 507 searching for changes
508 508 abort: missing support for test:unknown
509 509 [255]
510 510
511 511 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
512 512 pushing to http://localhost:$HGPORT2/
513 513 searching for changes
514 514 abort: missing support for test:unknown
515 515 [255]
516 516
517 517 Doing the actual push: race
518 518
519 519 $ cat << EOF >> $HGRCPATH
520 520 > [failpush]
521 521 > reason = race
522 522 > EOF
523 523
524 524 $ hg -R main push other -r e7ec4e813ba6
525 525 pushing to other
526 526 searching for changes
527 527 abort: push failed:
528 528 'repository changed while pushing - please try again'
529 529 [255]
530 530
531 531 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
532 532 pushing to ssh://user@dummy/other
533 533 searching for changes
534 534 abort: push failed:
535 535 'repository changed while pushing - please try again'
536 536 [255]
537 537
538 538 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
539 539 pushing to http://localhost:$HGPORT2/
540 540 searching for changes
541 541 abort: push failed:
542 542 'repository changed while pushing - please try again'
543 543 [255]
544 544
545 545 Doing the actual push: hook abort
546 546
547 547 $ cat << EOF >> $HGRCPATH
548 548 > [failpush]
549 549 > reason =
550 550 > [hooks]
551 551 > pretxnclose.failpush = false
552 552 > EOF
553 553
554 554 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
555 555 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
556 556 $ cat other.pid >> $DAEMON_PIDS
557 557
558 558 $ hg -R main push other -r e7ec4e813ba6
559 559 pushing to other
560 560 searching for changes
561 561 pre-close-tip:e7ec4e813ba6 draft
562 562 transaction abort!
563 563 rollback completed
564 remote: adding changesets
565 remote: adding manifests
566 remote: adding file changes
567 remote: added 1 changesets with 1 changes to 1 files
564 568 abort: pretxnclose.failpush hook exited with status 1
565 569 [255]
566 570
567 571 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
568 572 pushing to ssh://user@dummy/other
569 573 searching for changes
570 574 remote: adding changesets
571 575 remote: adding manifests
572 576 remote: adding file changes
573 577 remote: added 1 changesets with 1 changes to 1 files
574 578 abort: pretxnclose.failpush hook exited with status 1
575 579 remote: pre-close-tip:e7ec4e813ba6 draft
576 580 remote: transaction abort!
577 581 remote: rollback completed
578 582 [255]
579 583
580 584 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
581 585 pushing to http://localhost:$HGPORT2/
582 586 searching for changes
583 587 remote: adding changesets
584 588 remote: adding manifests
585 589 remote: adding file changes
586 590 remote: added 1 changesets with 1 changes to 1 files
587 591 abort: pretxnclose.failpush hook exited with status 1
588 592 [255]
589 593
590 594 (check that no 'pending' files remain)
591 595
592 596 $ ls -1 other/.hg/bookmarks*
593 597 other/.hg/bookmarks
594 598 $ ls -1 other/.hg/store/phaseroots*
595 599 other/.hg/store/phaseroots
596 600 $ ls -1 other/.hg/store/00changelog.i*
597 601 other/.hg/store/00changelog.i
598 602
General Comments 0
You need to be logged in to leave comments. Login now