##// END OF EJS Templates
clfilter: do not use tags cache if there are filtered changesets...
Pierre-Yves David -
r17715:21c50348 default
parent child Browse files
Show More
@@ -1,2616 +1,2620
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28 28
29 29 class localpeer(peer.peerrepository):
30 30 '''peer for a local repo; reflects only the most recent API'''
31 31
32 32 def __init__(self, repo, caps=MODERNCAPS):
33 33 peer.peerrepository.__init__(self)
34 34 self._repo = repo
35 35 self.ui = repo.ui
36 36 self._caps = repo._restrictcapabilities(caps)
37 37 self.requirements = repo.requirements
38 38 self.supportedformats = repo.supportedformats
39 39
40 40 def close(self):
41 41 self._repo.close()
42 42
43 43 def _capabilities(self):
44 44 return self._caps
45 45
46 46 def local(self):
47 47 return self._repo
48 48
49 49 def canpush(self):
50 50 return True
51 51
52 52 def url(self):
53 53 return self._repo.url()
54 54
55 55 def lookup(self, key):
56 56 return self._repo.lookup(key)
57 57
58 58 def branchmap(self):
59 59 return discovery.visiblebranchmap(self._repo)
60 60
61 61 def heads(self):
62 62 return discovery.visibleheads(self._repo)
63 63
64 64 def known(self, nodes):
65 65 return self._repo.known(nodes)
66 66
67 67 def getbundle(self, source, heads=None, common=None):
68 68 return self._repo.getbundle(source, heads=heads, common=common)
69 69
70 70 # TODO We might want to move the next two calls into legacypeer and add
71 71 # unbundle instead.
72 72
73 73 def lock(self):
74 74 return self._repo.lock()
75 75
76 76 def addchangegroup(self, cg, source, url):
77 77 return self._repo.addchangegroup(cg, source, url)
78 78
79 79 def pushkey(self, namespace, key, old, new):
80 80 return self._repo.pushkey(namespace, key, old, new)
81 81
82 82 def listkeys(self, namespace):
83 83 return self._repo.listkeys(namespace)
84 84
85 85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 86 '''used to test argument passing over the wire'''
87 87 return "%s %s %s %s %s" % (one, two, three, four, five)
88 88
89 89 class locallegacypeer(localpeer):
90 90 '''peer extension which implements legacy methods too; used for tests with
91 91 restricted capabilities'''
92 92
93 93 def __init__(self, repo):
94 94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95 95
96 96 def branches(self, nodes):
97 97 return self._repo.branches(nodes)
98 98
99 99 def between(self, pairs):
100 100 return self._repo.between(pairs)
101 101
102 102 def changegroup(self, basenodes, source):
103 103 return self._repo.changegroup(basenodes, source)
104 104
105 105 def changegroupsubset(self, bases, heads, source):
106 106 return self._repo.changegroupsubset(bases, heads, source)
107 107
108 108 class localrepository(object):
109 109
110 110 supportedformats = set(('revlogv1', 'generaldelta'))
111 111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 112 'dotencode'))
113 113 openerreqs = set(('revlogv1', 'generaldelta'))
114 114 requirements = ['revlogv1']
115 115
116 116 def _baserequirements(self, create):
117 117 return self.requirements[:]
118 118
119 119 def __init__(self, baseui, path=None, create=False):
120 120 self.wvfs = scmutil.vfs(path, expand=True)
121 121 self.wopener = self.wvfs
122 122 self.root = self.wvfs.base
123 123 self.path = self.wvfs.join(".hg")
124 124 self.origroot = path
125 125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 126 self.vfs = scmutil.vfs(self.path)
127 127 self.opener = self.vfs
128 128 self.baseui = baseui
129 129 self.ui = baseui.copy()
130 130 # A list of callback to shape the phase if no data were found.
131 131 # Callback are in the form: func(repo, roots) --> processed root.
132 132 # This list it to be filled by extension during repo setup
133 133 self._phasedefaults = []
134 134 try:
135 135 self.ui.readconfig(self.join("hgrc"), self.root)
136 136 extensions.loadall(self.ui)
137 137 except IOError:
138 138 pass
139 139
140 140 if not self.vfs.isdir():
141 141 if create:
142 142 if not self.wvfs.exists():
143 143 self.wvfs.makedirs()
144 144 self.vfs.makedir(notindexed=True)
145 145 requirements = self._baserequirements(create)
146 146 if self.ui.configbool('format', 'usestore', True):
147 147 self.vfs.mkdir("store")
148 148 requirements.append("store")
149 149 if self.ui.configbool('format', 'usefncache', True):
150 150 requirements.append("fncache")
151 151 if self.ui.configbool('format', 'dotencode', True):
152 152 requirements.append('dotencode')
153 153 # create an invalid changelog
154 154 self.vfs.append(
155 155 "00changelog.i",
156 156 '\0\0\0\2' # represents revlogv2
157 157 ' dummy changelog to prevent using the old repo layout'
158 158 )
159 159 if self.ui.configbool('format', 'generaldelta', False):
160 160 requirements.append("generaldelta")
161 161 requirements = set(requirements)
162 162 else:
163 163 raise error.RepoError(_("repository %s not found") % path)
164 164 elif create:
165 165 raise error.RepoError(_("repository %s already exists") % path)
166 166 else:
167 167 try:
168 168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 169 except IOError, inst:
170 170 if inst.errno != errno.ENOENT:
171 171 raise
172 172 requirements = set()
173 173
174 174 self.sharedpath = self.path
175 175 try:
176 176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 177 if not os.path.exists(s):
178 178 raise error.RepoError(
179 179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 180 self.sharedpath = s
181 181 except IOError, inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184
185 185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
186 186 self.spath = self.store.path
187 187 self.svfs = self.store.vfs
188 188 self.sopener = self.svfs
189 189 self.sjoin = self.store.join
190 190 self.vfs.createmode = self.store.createmode
191 191 self._applyrequirements(requirements)
192 192 if create:
193 193 self._writerequirements()
194 194
195 195
196 196 self._branchcache = None
197 197 self._branchcachetip = None
198 198 self.filterpats = {}
199 199 self._datafilters = {}
200 200 self._transref = self._lockref = self._wlockref = None
201 201
202 202 # A cache for various files under .hg/ that tracks file changes,
203 203 # (used by the filecache decorator)
204 204 #
205 205 # Maps a property name to its util.filecacheentry
206 206 self._filecache = {}
207 207
208 208 def close(self):
209 209 pass
210 210
211 211 def _restrictcapabilities(self, caps):
212 212 return caps
213 213
214 214 def _applyrequirements(self, requirements):
215 215 self.requirements = requirements
216 216 self.sopener.options = dict((r, 1) for r in requirements
217 217 if r in self.openerreqs)
218 218
219 219 def _writerequirements(self):
220 220 reqfile = self.opener("requires", "w")
221 221 for r in self.requirements:
222 222 reqfile.write("%s\n" % r)
223 223 reqfile.close()
224 224
225 225 def _checknested(self, path):
226 226 """Determine if path is a legal nested repository."""
227 227 if not path.startswith(self.root):
228 228 return False
229 229 subpath = path[len(self.root) + 1:]
230 230 normsubpath = util.pconvert(subpath)
231 231
232 232 # XXX: Checking against the current working copy is wrong in
233 233 # the sense that it can reject things like
234 234 #
235 235 # $ hg cat -r 10 sub/x.txt
236 236 #
237 237 # if sub/ is no longer a subrepository in the working copy
238 238 # parent revision.
239 239 #
240 240 # However, it can of course also allow things that would have
241 241 # been rejected before, such as the above cat command if sub/
242 242 # is a subrepository now, but was a normal directory before.
243 243 # The old path auditor would have rejected by mistake since it
244 244 # panics when it sees sub/.hg/.
245 245 #
246 246 # All in all, checking against the working copy seems sensible
247 247 # since we want to prevent access to nested repositories on
248 248 # the filesystem *now*.
249 249 ctx = self[None]
250 250 parts = util.splitpath(subpath)
251 251 while parts:
252 252 prefix = '/'.join(parts)
253 253 if prefix in ctx.substate:
254 254 if prefix == normsubpath:
255 255 return True
256 256 else:
257 257 sub = ctx.sub(prefix)
258 258 return sub.checknested(subpath[len(prefix) + 1:])
259 259 else:
260 260 parts.pop()
261 261 return False
262 262
263 263 def peer(self):
264 264 return localpeer(self) # not cached to avoid reference cycle
265 265
266 266 @filecache('bookmarks')
267 267 def _bookmarks(self):
268 268 return bookmarks.read(self)
269 269
270 270 @filecache('bookmarks.current')
271 271 def _bookmarkcurrent(self):
272 272 return bookmarks.readcurrent(self)
273 273
274 274 def _writebookmarks(self, marks):
275 275 bookmarks.write(self)
276 276
277 277 def bookmarkheads(self, bookmark):
278 278 name = bookmark.split('@', 1)[0]
279 279 heads = []
280 280 for mark, n in self._bookmarks.iteritems():
281 281 if mark.split('@', 1)[0] == name:
282 282 heads.append(n)
283 283 return heads
284 284
285 285 @storecache('phaseroots')
286 286 def _phasecache(self):
287 287 return phases.phasecache(self, self._phasedefaults)
288 288
289 289 @storecache('obsstore')
290 290 def obsstore(self):
291 291 store = obsolete.obsstore(self.sopener)
292 292 if store and not obsolete._enabled:
293 293 # message is rare enough to not be translated
294 294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 295 self.ui.warn(msg % len(list(store)))
296 296 return store
297 297
298 298 @propertycache
299 299 def hiddenrevs(self):
300 300 """hiddenrevs: revs that should be hidden by command and tools
301 301
302 302 This set is carried on the repo to ease initialization and lazy
303 303 loading; it'll probably move back to changelog for efficiency and
304 304 consistency reasons.
305 305
306 306 Note that the hiddenrevs will needs invalidations when
307 307 - a new changesets is added (possible unstable above extinct)
308 308 - a new obsolete marker is added (possible new extinct changeset)
309 309
310 310 hidden changesets cannot have non-hidden descendants
311 311 """
312 312 hidden = set()
313 313 if self.obsstore:
314 314 ### hide extinct changeset that are not accessible by any mean
315 315 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
316 316 hidden.update(self.revs(hiddenquery))
317 317 return hidden
318 318
319 319 @storecache('00changelog.i')
320 320 def changelog(self):
321 321 c = changelog.changelog(self.sopener)
322 322 if 'HG_PENDING' in os.environ:
323 323 p = os.environ['HG_PENDING']
324 324 if p.startswith(self.root):
325 325 c.readpending('00changelog.i.a')
326 326 return c
327 327
328 328 @storecache('00manifest.i')
329 329 def manifest(self):
330 330 return manifest.manifest(self.sopener)
331 331
332 332 @filecache('dirstate')
333 333 def dirstate(self):
334 334 warned = [0]
335 335 def validate(node):
336 336 try:
337 337 self.changelog.rev(node)
338 338 return node
339 339 except error.LookupError:
340 340 if not warned[0]:
341 341 warned[0] = True
342 342 self.ui.warn(_("warning: ignoring unknown"
343 343 " working parent %s!\n") % short(node))
344 344 return nullid
345 345
346 346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
347 347
348 348 def __getitem__(self, changeid):
349 349 if changeid is None:
350 350 return context.workingctx(self)
351 351 return context.changectx(self, changeid)
352 352
353 353 def __contains__(self, changeid):
354 354 try:
355 355 return bool(self.lookup(changeid))
356 356 except error.RepoLookupError:
357 357 return False
358 358
359 359 def __nonzero__(self):
360 360 return True
361 361
362 362 def __len__(self):
363 363 return len(self.changelog)
364 364
365 365 def __iter__(self):
366 366 return iter(self.changelog)
367 367
368 368 def revs(self, expr, *args):
369 369 '''Return a list of revisions matching the given revset'''
370 370 expr = revset.formatspec(expr, *args)
371 371 m = revset.match(None, expr)
372 372 return [r for r in m(self, list(self))]
373 373
374 374 def set(self, expr, *args):
375 375 '''
376 376 Yield a context for each matching revision, after doing arg
377 377 replacement via revset.formatspec
378 378 '''
379 379 for r in self.revs(expr, *args):
380 380 yield self[r]
381 381
382 382 def url(self):
383 383 return 'file:' + self.root
384 384
385 385 def hook(self, name, throw=False, **args):
386 386 return hook.hook(self.ui, self, name, throw, **args)
387 387
388 388 tag_disallowed = ':\r\n'
389 389
390 390 def _tag(self, names, node, message, local, user, date, extra={}):
391 391 if isinstance(names, str):
392 392 allchars = names
393 393 names = (names,)
394 394 else:
395 395 allchars = ''.join(names)
396 396 for c in self.tag_disallowed:
397 397 if c in allchars:
398 398 raise util.Abort(_('%r cannot be used in a tag name') % c)
399 399
400 400 branches = self.branchmap()
401 401 for name in names:
402 402 self.hook('pretag', throw=True, node=hex(node), tag=name,
403 403 local=local)
404 404 if name in branches:
405 405 self.ui.warn(_("warning: tag %s conflicts with existing"
406 406 " branch name\n") % name)
407 407
408 408 def writetags(fp, names, munge, prevtags):
409 409 fp.seek(0, 2)
410 410 if prevtags and prevtags[-1] != '\n':
411 411 fp.write('\n')
412 412 for name in names:
413 413 m = munge and munge(name) or name
414 414 if (self._tagscache.tagtypes and
415 415 name in self._tagscache.tagtypes):
416 416 old = self.tags().get(name, nullid)
417 417 fp.write('%s %s\n' % (hex(old), m))
418 418 fp.write('%s %s\n' % (hex(node), m))
419 419 fp.close()
420 420
421 421 prevtags = ''
422 422 if local:
423 423 try:
424 424 fp = self.opener('localtags', 'r+')
425 425 except IOError:
426 426 fp = self.opener('localtags', 'a')
427 427 else:
428 428 prevtags = fp.read()
429 429
430 430 # local tags are stored in the current charset
431 431 writetags(fp, names, None, prevtags)
432 432 for name in names:
433 433 self.hook('tag', node=hex(node), tag=name, local=local)
434 434 return
435 435
436 436 try:
437 437 fp = self.wfile('.hgtags', 'rb+')
438 438 except IOError, e:
439 439 if e.errno != errno.ENOENT:
440 440 raise
441 441 fp = self.wfile('.hgtags', 'ab')
442 442 else:
443 443 prevtags = fp.read()
444 444
445 445 # committed tags are stored in UTF-8
446 446 writetags(fp, names, encoding.fromlocal, prevtags)
447 447
448 448 fp.close()
449 449
450 450 self.invalidatecaches()
451 451
452 452 if '.hgtags' not in self.dirstate:
453 453 self[None].add(['.hgtags'])
454 454
455 455 m = matchmod.exact(self.root, '', ['.hgtags'])
456 456 tagnode = self.commit(message, user, date, extra=extra, match=m)
457 457
458 458 for name in names:
459 459 self.hook('tag', node=hex(node), tag=name, local=local)
460 460
461 461 return tagnode
462 462
463 463 def tag(self, names, node, message, local, user, date):
464 464 '''tag a revision with one or more symbolic names.
465 465
466 466 names is a list of strings or, when adding a single tag, names may be a
467 467 string.
468 468
469 469 if local is True, the tags are stored in a per-repository file.
470 470 otherwise, they are stored in the .hgtags file, and a new
471 471 changeset is committed with the change.
472 472
473 473 keyword arguments:
474 474
475 475 local: whether to store tags in non-version-controlled file
476 476 (default False)
477 477
478 478 message: commit message to use if committing
479 479
480 480 user: name of user to use if committing
481 481
482 482 date: date tuple to use if committing'''
483 483
484 484 if not local:
485 485 for x in self.status()[:5]:
486 486 if '.hgtags' in x:
487 487 raise util.Abort(_('working copy of .hgtags is changed '
488 488 '(please commit .hgtags manually)'))
489 489
490 490 self.tags() # instantiate the cache
491 491 self._tag(names, node, message, local, user, date)
492 492
493 493 @propertycache
494 494 def _tagscache(self):
495 495 '''Returns a tagscache object that contains various tags related
496 496 caches.'''
497 497
498 498 # This simplifies its cache management by having one decorated
499 499 # function (this one) and the rest simply fetch things from it.
500 500 class tagscache(object):
501 501 def __init__(self):
502 502 # These two define the set of tags for this repository. tags
503 503 # maps tag name to node; tagtypes maps tag name to 'global' or
504 504 # 'local'. (Global tags are defined by .hgtags across all
505 505 # heads, and local tags are defined in .hg/localtags.)
506 506 # They constitute the in-memory cache of tags.
507 507 self.tags = self.tagtypes = None
508 508
509 509 self.nodetagscache = self.tagslist = None
510 510
511 511 cache = tagscache()
512 512 cache.tags, cache.tagtypes = self._findtags()
513 513
514 514 return cache
515 515
516 516 def tags(self):
517 517 '''return a mapping of tag to node'''
518 518 t = {}
519 for k, v in self._tagscache.tags.iteritems():
519 if self.changelog.filteredrevs:
520 tags, tt = self._findtags()
521 else:
522 tags = self._tagscache.tags
523 for k, v in tags.iteritems():
520 524 try:
521 525 # ignore tags to unknown nodes
522 526 self.changelog.rev(v)
523 527 t[k] = v
524 528 except (error.LookupError, ValueError):
525 529 pass
526 530 return t
527 531
528 532 def _findtags(self):
529 533 '''Do the hard work of finding tags. Return a pair of dicts
530 534 (tags, tagtypes) where tags maps tag name to node, and tagtypes
531 535 maps tag name to a string like \'global\' or \'local\'.
532 536 Subclasses or extensions are free to add their own tags, but
533 537 should be aware that the returned dicts will be retained for the
534 538 duration of the localrepo object.'''
535 539
536 540 # XXX what tagtype should subclasses/extensions use? Currently
537 541 # mq and bookmarks add tags, but do not set the tagtype at all.
538 542 # Should each extension invent its own tag type? Should there
539 543 # be one tagtype for all such "virtual" tags? Or is the status
540 544 # quo fine?
541 545
542 546 alltags = {} # map tag name to (node, hist)
543 547 tagtypes = {}
544 548
545 549 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
546 550 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
547 551
548 552 # Build the return dicts. Have to re-encode tag names because
549 553 # the tags module always uses UTF-8 (in order not to lose info
550 554 # writing to the cache), but the rest of Mercurial wants them in
551 555 # local encoding.
552 556 tags = {}
553 557 for (name, (node, hist)) in alltags.iteritems():
554 558 if node != nullid:
555 559 tags[encoding.tolocal(name)] = node
556 560 tags['tip'] = self.changelog.tip()
557 561 tagtypes = dict([(encoding.tolocal(name), value)
558 562 for (name, value) in tagtypes.iteritems()])
559 563 return (tags, tagtypes)
560 564
561 565 def tagtype(self, tagname):
562 566 '''
563 567 return the type of the given tag. result can be:
564 568
565 569 'local' : a local tag
566 570 'global' : a global tag
567 571 None : tag does not exist
568 572 '''
569 573
570 574 return self._tagscache.tagtypes.get(tagname)
571 575
572 576 def tagslist(self):
573 577 '''return a list of tags ordered by revision'''
574 578 if not self._tagscache.tagslist:
575 579 l = []
576 580 for t, n in self.tags().iteritems():
577 581 r = self.changelog.rev(n)
578 582 l.append((r, t, n))
579 583 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
580 584
581 585 return self._tagscache.tagslist
582 586
583 587 def nodetags(self, node):
584 588 '''return the tags associated with a node'''
585 589 if not self._tagscache.nodetagscache:
586 590 nodetagscache = {}
587 591 for t, n in self._tagscache.tags.iteritems():
588 592 nodetagscache.setdefault(n, []).append(t)
589 593 for tags in nodetagscache.itervalues():
590 594 tags.sort()
591 595 self._tagscache.nodetagscache = nodetagscache
592 596 return self._tagscache.nodetagscache.get(node, [])
593 597
594 598 def nodebookmarks(self, node):
595 599 marks = []
596 600 for bookmark, n in self._bookmarks.iteritems():
597 601 if n == node:
598 602 marks.append(bookmark)
599 603 return sorted(marks)
600 604
601 605 def _branchtags(self, partial, lrev):
602 606 # TODO: rename this function?
603 607 tiprev = len(self) - 1
604 608 if lrev != tiprev:
605 609 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
606 610 self._updatebranchcache(partial, ctxgen)
607 611 self._writebranchcache(partial, self.changelog.tip(), tiprev)
608 612
609 613 return partial
610 614
611 615 def updatebranchcache(self):
612 616 tip = self.changelog.tip()
613 617 if self._branchcache is not None and self._branchcachetip == tip:
614 618 return
615 619
616 620 oldtip = self._branchcachetip
617 621 self._branchcachetip = tip
618 622 if oldtip is None or oldtip not in self.changelog.nodemap:
619 623 partial, last, lrev = self._readbranchcache()
620 624 else:
621 625 lrev = self.changelog.rev(oldtip)
622 626 partial = self._branchcache
623 627
624 628 self._branchtags(partial, lrev)
625 629 # this private cache holds all heads (not just the branch tips)
626 630 self._branchcache = partial
627 631
628 632 def branchmap(self):
629 633 '''returns a dictionary {branch: [branchheads]}'''
630 634 if self.changelog.filteredrevs:
631 635 # some changeset are excluded we can't use the cache
632 636 branchmap = {}
633 637 self._updatebranchcache(branchmap, (self[r] for r in self))
634 638 return branchmap
635 639 else:
636 640 self.updatebranchcache()
637 641 return self._branchcache
638 642
639 643
640 644 def _branchtip(self, heads):
641 645 '''return the tipmost branch head in heads'''
642 646 tip = heads[-1]
643 647 for h in reversed(heads):
644 648 if not self[h].closesbranch():
645 649 tip = h
646 650 break
647 651 return tip
648 652
649 653 def branchtip(self, branch):
650 654 '''return the tip node for a given branch'''
651 655 if branch not in self.branchmap():
652 656 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
653 657 return self._branchtip(self.branchmap()[branch])
654 658
655 659 def branchtags(self):
656 660 '''return a dict where branch names map to the tipmost head of
657 661 the branch, open heads come before closed'''
658 662 bt = {}
659 663 for bn, heads in self.branchmap().iteritems():
660 664 bt[bn] = self._branchtip(heads)
661 665 return bt
662 666
663 667 def _readbranchcache(self):
664 668 partial = {}
665 669 try:
666 670 f = self.opener("cache/branchheads")
667 671 lines = f.read().split('\n')
668 672 f.close()
669 673 except (IOError, OSError):
670 674 return {}, nullid, nullrev
671 675
672 676 try:
673 677 last, lrev = lines.pop(0).split(" ", 1)
674 678 last, lrev = bin(last), int(lrev)
675 679 if lrev >= len(self) or self[lrev].node() != last:
676 680 # invalidate the cache
677 681 raise ValueError('invalidating branch cache (tip differs)')
678 682 for l in lines:
679 683 if not l:
680 684 continue
681 685 node, label = l.split(" ", 1)
682 686 label = encoding.tolocal(label.strip())
683 687 if not node in self:
684 688 raise ValueError('invalidating branch cache because node '+
685 689 '%s does not exist' % node)
686 690 partial.setdefault(label, []).append(bin(node))
687 691 except KeyboardInterrupt:
688 692 raise
689 693 except Exception, inst:
690 694 if self.ui.debugflag:
691 695 self.ui.warn(str(inst), '\n')
692 696 partial, last, lrev = {}, nullid, nullrev
693 697 return partial, last, lrev
694 698
695 699 def _writebranchcache(self, branches, tip, tiprev):
696 700 try:
697 701 f = self.opener("cache/branchheads", "w", atomictemp=True)
698 702 f.write("%s %s\n" % (hex(tip), tiprev))
699 703 for label, nodes in branches.iteritems():
700 704 for node in nodes:
701 705 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
702 706 f.close()
703 707 except (IOError, OSError):
704 708 pass
705 709
706 710 def _updatebranchcache(self, partial, ctxgen):
707 711 """Given a branchhead cache, partial, that may have extra nodes or be
708 712 missing heads, and a generator of nodes that are at least a superset of
709 713 heads missing, this function updates partial to be correct.
710 714 """
711 715 # collect new branch entries
712 716 newbranches = {}
713 717 for c in ctxgen:
714 718 newbranches.setdefault(c.branch(), []).append(c.node())
715 719 # if older branchheads are reachable from new ones, they aren't
716 720 # really branchheads. Note checking parents is insufficient:
717 721 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
718 722 for branch, newnodes in newbranches.iteritems():
719 723 bheads = partial.setdefault(branch, [])
720 724 # Remove candidate heads that no longer are in the repo (e.g., as
721 725 # the result of a strip that just happened). Avoid using 'node in
722 726 # self' here because that dives down into branchcache code somewhat
723 727 # recursively.
724 728 bheadrevs = [self.changelog.rev(node) for node in bheads
725 729 if self.changelog.hasnode(node)]
726 730 newheadrevs = [self.changelog.rev(node) for node in newnodes
727 731 if self.changelog.hasnode(node)]
728 732 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
729 733 # Remove duplicates - nodes that are in newheadrevs and are already
730 734 # in bheadrevs. This can happen if you strip a node whose parent
731 735 # was already a head (because they're on different branches).
732 736 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
733 737
734 738 # Starting from tip means fewer passes over reachable. If we know
735 739 # the new candidates are not ancestors of existing heads, we don't
736 740 # have to examine ancestors of existing heads
737 741 if ctxisnew:
738 742 iterrevs = sorted(newheadrevs)
739 743 else:
740 744 iterrevs = list(bheadrevs)
741 745
742 746 # This loop prunes out two kinds of heads - heads that are
743 747 # superseded by a head in newheadrevs, and newheadrevs that are not
744 748 # heads because an existing head is their descendant.
745 749 while iterrevs:
746 750 latest = iterrevs.pop()
747 751 if latest not in bheadrevs:
748 752 continue
749 753 ancestors = set(self.changelog.ancestors([latest],
750 754 bheadrevs[0]))
751 755 if ancestors:
752 756 bheadrevs = [b for b in bheadrevs if b not in ancestors]
753 757 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
754 758
755 759 # There may be branches that cease to exist when the last commit in the
756 760 # branch was stripped. This code filters them out. Note that the
757 761 # branch that ceased to exist may not be in newbranches because
758 762 # newbranches is the set of candidate heads, which when you strip the
759 763 # last commit in a branch will be the parent branch.
760 764 for branch in partial.keys():
761 765 nodes = [head for head in partial[branch]
762 766 if self.changelog.hasnode(head)]
763 767 if not nodes:
764 768 del partial[branch]
765 769
766 770 def lookup(self, key):
767 771 return self[key].node()
768 772
769 773 def lookupbranch(self, key, remote=None):
770 774 repo = remote or self
771 775 if key in repo.branchmap():
772 776 return key
773 777
774 778 repo = (remote and remote.local()) and remote or self
775 779 return repo[key].branch()
776 780
777 781 def known(self, nodes):
778 782 nm = self.changelog.nodemap
779 783 pc = self._phasecache
780 784 result = []
781 785 for n in nodes:
782 786 r = nm.get(n)
783 787 resp = not (r is None or pc.phase(self, r) >= phases.secret)
784 788 result.append(resp)
785 789 return result
786 790
787 791 def local(self):
788 792 return self
789 793
790 794 def cancopy(self):
791 795 return self.local() # so statichttprepo's override of local() works
792 796
793 797 def join(self, f):
794 798 return os.path.join(self.path, f)
795 799
796 800 def wjoin(self, f):
797 801 return os.path.join(self.root, f)
798 802
799 803 def file(self, f):
800 804 if f[0] == '/':
801 805 f = f[1:]
802 806 return filelog.filelog(self.sopener, f)
803 807
804 808 def changectx(self, changeid):
805 809 return self[changeid]
806 810
807 811 def parents(self, changeid=None):
808 812 '''get list of changectxs for parents of changeid'''
809 813 return self[changeid].parents()
810 814
811 815 def setparents(self, p1, p2=nullid):
812 816 copies = self.dirstate.setparents(p1, p2)
813 817 if copies:
814 818 # Adjust copy records, the dirstate cannot do it, it
815 819 # requires access to parents manifests. Preserve them
816 820 # only for entries added to first parent.
817 821 pctx = self[p1]
818 822 for f in copies:
819 823 if f not in pctx and copies[f] in pctx:
820 824 self.dirstate.copy(copies[f], f)
821 825
822 826 def filectx(self, path, changeid=None, fileid=None):
823 827 """changeid can be a changeset revision, node, or tag.
824 828 fileid can be a file revision or node."""
825 829 return context.filectx(self, path, changeid, fileid)
826 830
827 831 def getcwd(self):
828 832 return self.dirstate.getcwd()
829 833
830 834 def pathto(self, f, cwd=None):
831 835 return self.dirstate.pathto(f, cwd)
832 836
833 837 def wfile(self, f, mode='r'):
834 838 return self.wopener(f, mode)
835 839
836 840 def _link(self, f):
837 841 return os.path.islink(self.wjoin(f))
838 842
839 843 def _loadfilter(self, filter):
840 844 if filter not in self.filterpats:
841 845 l = []
842 846 for pat, cmd in self.ui.configitems(filter):
843 847 if cmd == '!':
844 848 continue
845 849 mf = matchmod.match(self.root, '', [pat])
846 850 fn = None
847 851 params = cmd
848 852 for name, filterfn in self._datafilters.iteritems():
849 853 if cmd.startswith(name):
850 854 fn = filterfn
851 855 params = cmd[len(name):].lstrip()
852 856 break
853 857 if not fn:
854 858 fn = lambda s, c, **kwargs: util.filter(s, c)
855 859 # Wrap old filters not supporting keyword arguments
856 860 if not inspect.getargspec(fn)[2]:
857 861 oldfn = fn
858 862 fn = lambda s, c, **kwargs: oldfn(s, c)
859 863 l.append((mf, fn, params))
860 864 self.filterpats[filter] = l
861 865 return self.filterpats[filter]
862 866
863 867 def _filter(self, filterpats, filename, data):
864 868 for mf, fn, cmd in filterpats:
865 869 if mf(filename):
866 870 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
867 871 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
868 872 break
869 873
870 874 return data
871 875
872 876 @propertycache
873 877 def _encodefilterpats(self):
874 878 return self._loadfilter('encode')
875 879
876 880 @propertycache
877 881 def _decodefilterpats(self):
878 882 return self._loadfilter('decode')
879 883
880 884 def adddatafilter(self, name, filter):
881 885 self._datafilters[name] = filter
882 886
883 887 def wread(self, filename):
884 888 if self._link(filename):
885 889 data = os.readlink(self.wjoin(filename))
886 890 else:
887 891 data = self.wopener.read(filename)
888 892 return self._filter(self._encodefilterpats, filename, data)
889 893
890 894 def wwrite(self, filename, data, flags):
891 895 data = self._filter(self._decodefilterpats, filename, data)
892 896 if 'l' in flags:
893 897 self.wopener.symlink(data, filename)
894 898 else:
895 899 self.wopener.write(filename, data)
896 900 if 'x' in flags:
897 901 util.setflags(self.wjoin(filename), False, True)
898 902
899 903 def wwritedata(self, filename, data):
900 904 return self._filter(self._decodefilterpats, filename, data)
901 905
902 906 def transaction(self, desc):
903 907 tr = self._transref and self._transref() or None
904 908 if tr and tr.running():
905 909 return tr.nest()
906 910
907 911 # abort here if the journal already exists
908 912 if os.path.exists(self.sjoin("journal")):
909 913 raise error.RepoError(
910 914 _("abandoned transaction found - run hg recover"))
911 915
912 916 self._writejournal(desc)
913 917 renames = [(x, undoname(x)) for x in self._journalfiles()]
914 918
915 919 tr = transaction.transaction(self.ui.warn, self.sopener,
916 920 self.sjoin("journal"),
917 921 aftertrans(renames),
918 922 self.store.createmode)
919 923 self._transref = weakref.ref(tr)
920 924 return tr
921 925
922 926 def _journalfiles(self):
923 927 return (self.sjoin('journal'), self.join('journal.dirstate'),
924 928 self.join('journal.branch'), self.join('journal.desc'),
925 929 self.join('journal.bookmarks'),
926 930 self.sjoin('journal.phaseroots'))
927 931
928 932 def undofiles(self):
929 933 return [undoname(x) for x in self._journalfiles()]
930 934
931 935 def _writejournal(self, desc):
932 936 self.opener.write("journal.dirstate",
933 937 self.opener.tryread("dirstate"))
934 938 self.opener.write("journal.branch",
935 939 encoding.fromlocal(self.dirstate.branch()))
936 940 self.opener.write("journal.desc",
937 941 "%d\n%s\n" % (len(self), desc))
938 942 self.opener.write("journal.bookmarks",
939 943 self.opener.tryread("bookmarks"))
940 944 self.sopener.write("journal.phaseroots",
941 945 self.sopener.tryread("phaseroots"))
942 946
943 947 def recover(self):
944 948 lock = self.lock()
945 949 try:
946 950 if os.path.exists(self.sjoin("journal")):
947 951 self.ui.status(_("rolling back interrupted transaction\n"))
948 952 transaction.rollback(self.sopener, self.sjoin("journal"),
949 953 self.ui.warn)
950 954 self.invalidate()
951 955 return True
952 956 else:
953 957 self.ui.warn(_("no interrupted transaction available\n"))
954 958 return False
955 959 finally:
956 960 lock.release()
957 961
958 962 def rollback(self, dryrun=False, force=False):
959 963 wlock = lock = None
960 964 try:
961 965 wlock = self.wlock()
962 966 lock = self.lock()
963 967 if os.path.exists(self.sjoin("undo")):
964 968 return self._rollback(dryrun, force)
965 969 else:
966 970 self.ui.warn(_("no rollback information available\n"))
967 971 return 1
968 972 finally:
969 973 release(lock, wlock)
970 974
971 975 def _rollback(self, dryrun, force):
972 976 ui = self.ui
973 977 try:
974 978 args = self.opener.read('undo.desc').splitlines()
975 979 (oldlen, desc, detail) = (int(args[0]), args[1], None)
976 980 if len(args) >= 3:
977 981 detail = args[2]
978 982 oldtip = oldlen - 1
979 983
980 984 if detail and ui.verbose:
981 985 msg = (_('repository tip rolled back to revision %s'
982 986 ' (undo %s: %s)\n')
983 987 % (oldtip, desc, detail))
984 988 else:
985 989 msg = (_('repository tip rolled back to revision %s'
986 990 ' (undo %s)\n')
987 991 % (oldtip, desc))
988 992 except IOError:
989 993 msg = _('rolling back unknown transaction\n')
990 994 desc = None
991 995
992 996 if not force and self['.'] != self['tip'] and desc == 'commit':
993 997 raise util.Abort(
994 998 _('rollback of last commit while not checked out '
995 999 'may lose data'), hint=_('use -f to force'))
996 1000
997 1001 ui.status(msg)
998 1002 if dryrun:
999 1003 return 0
1000 1004
1001 1005 parents = self.dirstate.parents()
1002 1006 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1003 1007 if os.path.exists(self.join('undo.bookmarks')):
1004 1008 util.rename(self.join('undo.bookmarks'),
1005 1009 self.join('bookmarks'))
1006 1010 if os.path.exists(self.sjoin('undo.phaseroots')):
1007 1011 util.rename(self.sjoin('undo.phaseroots'),
1008 1012 self.sjoin('phaseroots'))
1009 1013 self.invalidate()
1010 1014
1011 1015 # Discard all cache entries to force reloading everything.
1012 1016 self._filecache.clear()
1013 1017
1014 1018 parentgone = (parents[0] not in self.changelog.nodemap or
1015 1019 parents[1] not in self.changelog.nodemap)
1016 1020 if parentgone:
1017 1021 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1018 1022 try:
1019 1023 branch = self.opener.read('undo.branch')
1020 1024 self.dirstate.setbranch(encoding.tolocal(branch))
1021 1025 except IOError:
1022 1026 ui.warn(_('named branch could not be reset: '
1023 1027 'current branch is still \'%s\'\n')
1024 1028 % self.dirstate.branch())
1025 1029
1026 1030 self.dirstate.invalidate()
1027 1031 parents = tuple([p.rev() for p in self.parents()])
1028 1032 if len(parents) > 1:
1029 1033 ui.status(_('working directory now based on '
1030 1034 'revisions %d and %d\n') % parents)
1031 1035 else:
1032 1036 ui.status(_('working directory now based on '
1033 1037 'revision %d\n') % parents)
1034 1038 # TODO: if we know which new heads may result from this rollback, pass
1035 1039 # them to destroy(), which will prevent the branchhead cache from being
1036 1040 # invalidated.
1037 1041 self.destroyed()
1038 1042 return 0
1039 1043
1040 1044 def invalidatecaches(self):
1041 1045 def delcache(name):
1042 1046 try:
1043 1047 delattr(self, name)
1044 1048 except AttributeError:
1045 1049 pass
1046 1050
1047 1051 delcache('_tagscache')
1048 1052
1049 1053 self._branchcache = None # in UTF-8
1050 1054 self._branchcachetip = None
1051 1055 obsolete.clearobscaches(self)
1052 1056
1053 1057 def invalidatedirstate(self):
1054 1058 '''Invalidates the dirstate, causing the next call to dirstate
1055 1059 to check if it was modified since the last time it was read,
1056 1060 rereading it if it has.
1057 1061
1058 1062 This is different to dirstate.invalidate() that it doesn't always
1059 1063 rereads the dirstate. Use dirstate.invalidate() if you want to
1060 1064 explicitly read the dirstate again (i.e. restoring it to a previous
1061 1065 known good state).'''
1062 1066 if 'dirstate' in self.__dict__:
1063 1067 for k in self.dirstate._filecache:
1064 1068 try:
1065 1069 delattr(self.dirstate, k)
1066 1070 except AttributeError:
1067 1071 pass
1068 1072 delattr(self, 'dirstate')
1069 1073
1070 1074 def invalidate(self):
1071 1075 for k in self._filecache:
1072 1076 # dirstate is invalidated separately in invalidatedirstate()
1073 1077 if k == 'dirstate':
1074 1078 continue
1075 1079
1076 1080 try:
1077 1081 delattr(self, k)
1078 1082 except AttributeError:
1079 1083 pass
1080 1084 self.invalidatecaches()
1081 1085
1082 1086 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1083 1087 try:
1084 1088 l = lock.lock(lockname, 0, releasefn, desc=desc)
1085 1089 except error.LockHeld, inst:
1086 1090 if not wait:
1087 1091 raise
1088 1092 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1089 1093 (desc, inst.locker))
1090 1094 # default to 600 seconds timeout
1091 1095 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1092 1096 releasefn, desc=desc)
1093 1097 if acquirefn:
1094 1098 acquirefn()
1095 1099 return l
1096 1100
1097 1101 def _afterlock(self, callback):
1098 1102 """add a callback to the current repository lock.
1099 1103
1100 1104 The callback will be executed on lock release."""
1101 1105 l = self._lockref and self._lockref()
1102 1106 if l:
1103 1107 l.postrelease.append(callback)
1104 1108 else:
1105 1109 callback()
1106 1110
1107 1111 def lock(self, wait=True):
1108 1112 '''Lock the repository store (.hg/store) and return a weak reference
1109 1113 to the lock. Use this before modifying the store (e.g. committing or
1110 1114 stripping). If you are opening a transaction, get a lock as well.)'''
1111 1115 l = self._lockref and self._lockref()
1112 1116 if l is not None and l.held:
1113 1117 l.lock()
1114 1118 return l
1115 1119
1116 1120 def unlock():
1117 1121 self.store.write()
1118 1122 if '_phasecache' in vars(self):
1119 1123 self._phasecache.write()
1120 1124 for k, ce in self._filecache.items():
1121 1125 if k == 'dirstate':
1122 1126 continue
1123 1127 ce.refresh()
1124 1128
1125 1129 l = self._lock(self.sjoin("lock"), wait, unlock,
1126 1130 self.invalidate, _('repository %s') % self.origroot)
1127 1131 self._lockref = weakref.ref(l)
1128 1132 return l
1129 1133
1130 1134 def wlock(self, wait=True):
1131 1135 '''Lock the non-store parts of the repository (everything under
1132 1136 .hg except .hg/store) and return a weak reference to the lock.
1133 1137 Use this before modifying files in .hg.'''
1134 1138 l = self._wlockref and self._wlockref()
1135 1139 if l is not None and l.held:
1136 1140 l.lock()
1137 1141 return l
1138 1142
1139 1143 def unlock():
1140 1144 self.dirstate.write()
1141 1145 ce = self._filecache.get('dirstate')
1142 1146 if ce:
1143 1147 ce.refresh()
1144 1148
1145 1149 l = self._lock(self.join("wlock"), wait, unlock,
1146 1150 self.invalidatedirstate, _('working directory of %s') %
1147 1151 self.origroot)
1148 1152 self._wlockref = weakref.ref(l)
1149 1153 return l
1150 1154
1151 1155 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1152 1156 """
1153 1157 commit an individual file as part of a larger transaction
1154 1158 """
1155 1159
1156 1160 fname = fctx.path()
1157 1161 text = fctx.data()
1158 1162 flog = self.file(fname)
1159 1163 fparent1 = manifest1.get(fname, nullid)
1160 1164 fparent2 = fparent2o = manifest2.get(fname, nullid)
1161 1165
1162 1166 meta = {}
1163 1167 copy = fctx.renamed()
1164 1168 if copy and copy[0] != fname:
1165 1169 # Mark the new revision of this file as a copy of another
1166 1170 # file. This copy data will effectively act as a parent
1167 1171 # of this new revision. If this is a merge, the first
1168 1172 # parent will be the nullid (meaning "look up the copy data")
1169 1173 # and the second one will be the other parent. For example:
1170 1174 #
1171 1175 # 0 --- 1 --- 3 rev1 changes file foo
1172 1176 # \ / rev2 renames foo to bar and changes it
1173 1177 # \- 2 -/ rev3 should have bar with all changes and
1174 1178 # should record that bar descends from
1175 1179 # bar in rev2 and foo in rev1
1176 1180 #
1177 1181 # this allows this merge to succeed:
1178 1182 #
1179 1183 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1180 1184 # \ / merging rev3 and rev4 should use bar@rev2
1181 1185 # \- 2 --- 4 as the merge base
1182 1186 #
1183 1187
1184 1188 cfname = copy[0]
1185 1189 crev = manifest1.get(cfname)
1186 1190 newfparent = fparent2
1187 1191
1188 1192 if manifest2: # branch merge
1189 1193 if fparent2 == nullid or crev is None: # copied on remote side
1190 1194 if cfname in manifest2:
1191 1195 crev = manifest2[cfname]
1192 1196 newfparent = fparent1
1193 1197
1194 1198 # find source in nearest ancestor if we've lost track
1195 1199 if not crev:
1196 1200 self.ui.debug(" %s: searching for copy revision for %s\n" %
1197 1201 (fname, cfname))
1198 1202 for ancestor in self[None].ancestors():
1199 1203 if cfname in ancestor:
1200 1204 crev = ancestor[cfname].filenode()
1201 1205 break
1202 1206
1203 1207 if crev:
1204 1208 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1205 1209 meta["copy"] = cfname
1206 1210 meta["copyrev"] = hex(crev)
1207 1211 fparent1, fparent2 = nullid, newfparent
1208 1212 else:
1209 1213 self.ui.warn(_("warning: can't find ancestor for '%s' "
1210 1214 "copied from '%s'!\n") % (fname, cfname))
1211 1215
1212 1216 elif fparent2 != nullid:
1213 1217 # is one parent an ancestor of the other?
1214 1218 fparentancestor = flog.ancestor(fparent1, fparent2)
1215 1219 if fparentancestor == fparent1:
1216 1220 fparent1, fparent2 = fparent2, nullid
1217 1221 elif fparentancestor == fparent2:
1218 1222 fparent2 = nullid
1219 1223
1220 1224 # is the file changed?
1221 1225 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1222 1226 changelist.append(fname)
1223 1227 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1224 1228
1225 1229 # are just the flags changed during merge?
1226 1230 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1227 1231 changelist.append(fname)
1228 1232
1229 1233 return fparent1
1230 1234
1231 1235 def commit(self, text="", user=None, date=None, match=None, force=False,
1232 1236 editor=False, extra={}):
1233 1237 """Add a new revision to current repository.
1234 1238
1235 1239 Revision information is gathered from the working directory,
1236 1240 match can be used to filter the committed files. If editor is
1237 1241 supplied, it is called to get a commit message.
1238 1242 """
1239 1243
1240 1244 def fail(f, msg):
1241 1245 raise util.Abort('%s: %s' % (f, msg))
1242 1246
1243 1247 if not match:
1244 1248 match = matchmod.always(self.root, '')
1245 1249
1246 1250 if not force:
1247 1251 vdirs = []
1248 1252 match.dir = vdirs.append
1249 1253 match.bad = fail
1250 1254
1251 1255 wlock = self.wlock()
1252 1256 try:
1253 1257 wctx = self[None]
1254 1258 merge = len(wctx.parents()) > 1
1255 1259
1256 1260 if (not force and merge and match and
1257 1261 (match.files() or match.anypats())):
1258 1262 raise util.Abort(_('cannot partially commit a merge '
1259 1263 '(do not specify files or patterns)'))
1260 1264
1261 1265 changes = self.status(match=match, clean=force)
1262 1266 if force:
1263 1267 changes[0].extend(changes[6]) # mq may commit unchanged files
1264 1268
1265 1269 # check subrepos
1266 1270 subs = []
1267 1271 commitsubs = set()
1268 1272 newstate = wctx.substate.copy()
1269 1273 # only manage subrepos and .hgsubstate if .hgsub is present
1270 1274 if '.hgsub' in wctx:
1271 1275 # we'll decide whether to track this ourselves, thanks
1272 1276 if '.hgsubstate' in changes[0]:
1273 1277 changes[0].remove('.hgsubstate')
1274 1278 if '.hgsubstate' in changes[2]:
1275 1279 changes[2].remove('.hgsubstate')
1276 1280
1277 1281 # compare current state to last committed state
1278 1282 # build new substate based on last committed state
1279 1283 oldstate = wctx.p1().substate
1280 1284 for s in sorted(newstate.keys()):
1281 1285 if not match(s):
1282 1286 # ignore working copy, use old state if present
1283 1287 if s in oldstate:
1284 1288 newstate[s] = oldstate[s]
1285 1289 continue
1286 1290 if not force:
1287 1291 raise util.Abort(
1288 1292 _("commit with new subrepo %s excluded") % s)
1289 1293 if wctx.sub(s).dirty(True):
1290 1294 if not self.ui.configbool('ui', 'commitsubrepos'):
1291 1295 raise util.Abort(
1292 1296 _("uncommitted changes in subrepo %s") % s,
1293 1297 hint=_("use --subrepos for recursive commit"))
1294 1298 subs.append(s)
1295 1299 commitsubs.add(s)
1296 1300 else:
1297 1301 bs = wctx.sub(s).basestate()
1298 1302 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1299 1303 if oldstate.get(s, (None, None, None))[1] != bs:
1300 1304 subs.append(s)
1301 1305
1302 1306 # check for removed subrepos
1303 1307 for p in wctx.parents():
1304 1308 r = [s for s in p.substate if s not in newstate]
1305 1309 subs += [s for s in r if match(s)]
1306 1310 if subs:
1307 1311 if (not match('.hgsub') and
1308 1312 '.hgsub' in (wctx.modified() + wctx.added())):
1309 1313 raise util.Abort(
1310 1314 _("can't commit subrepos without .hgsub"))
1311 1315 changes[0].insert(0, '.hgsubstate')
1312 1316
1313 1317 elif '.hgsub' in changes[2]:
1314 1318 # clean up .hgsubstate when .hgsub is removed
1315 1319 if ('.hgsubstate' in wctx and
1316 1320 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1317 1321 changes[2].insert(0, '.hgsubstate')
1318 1322
1319 1323 # make sure all explicit patterns are matched
1320 1324 if not force and match.files():
1321 1325 matched = set(changes[0] + changes[1] + changes[2])
1322 1326
1323 1327 for f in match.files():
1324 1328 f = self.dirstate.normalize(f)
1325 1329 if f == '.' or f in matched or f in wctx.substate:
1326 1330 continue
1327 1331 if f in changes[3]: # missing
1328 1332 fail(f, _('file not found!'))
1329 1333 if f in vdirs: # visited directory
1330 1334 d = f + '/'
1331 1335 for mf in matched:
1332 1336 if mf.startswith(d):
1333 1337 break
1334 1338 else:
1335 1339 fail(f, _("no match under directory!"))
1336 1340 elif f not in self.dirstate:
1337 1341 fail(f, _("file not tracked!"))
1338 1342
1339 1343 if (not force and not extra.get("close") and not merge
1340 1344 and not (changes[0] or changes[1] or changes[2])
1341 1345 and wctx.branch() == wctx.p1().branch()):
1342 1346 return None
1343 1347
1344 1348 if merge and changes[3]:
1345 1349 raise util.Abort(_("cannot commit merge with missing files"))
1346 1350
1347 1351 ms = mergemod.mergestate(self)
1348 1352 for f in changes[0]:
1349 1353 if f in ms and ms[f] == 'u':
1350 1354 raise util.Abort(_("unresolved merge conflicts "
1351 1355 "(see hg help resolve)"))
1352 1356
1353 1357 cctx = context.workingctx(self, text, user, date, extra, changes)
1354 1358 if editor:
1355 1359 cctx._text = editor(self, cctx, subs)
1356 1360 edited = (text != cctx._text)
1357 1361
1358 1362 # commit subs and write new state
1359 1363 if subs:
1360 1364 for s in sorted(commitsubs):
1361 1365 sub = wctx.sub(s)
1362 1366 self.ui.status(_('committing subrepository %s\n') %
1363 1367 subrepo.subrelpath(sub))
1364 1368 sr = sub.commit(cctx._text, user, date)
1365 1369 newstate[s] = (newstate[s][0], sr)
1366 1370 subrepo.writestate(self, newstate)
1367 1371
1368 1372 # Save commit message in case this transaction gets rolled back
1369 1373 # (e.g. by a pretxncommit hook). Leave the content alone on
1370 1374 # the assumption that the user will use the same editor again.
1371 1375 msgfn = self.savecommitmessage(cctx._text)
1372 1376
1373 1377 p1, p2 = self.dirstate.parents()
1374 1378 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1375 1379 try:
1376 1380 self.hook("precommit", throw=True, parent1=hookp1,
1377 1381 parent2=hookp2)
1378 1382 ret = self.commitctx(cctx, True)
1379 1383 except: # re-raises
1380 1384 if edited:
1381 1385 self.ui.write(
1382 1386 _('note: commit message saved in %s\n') % msgfn)
1383 1387 raise
1384 1388
1385 1389 # update bookmarks, dirstate and mergestate
1386 1390 bookmarks.update(self, [p1, p2], ret)
1387 1391 for f in changes[0] + changes[1]:
1388 1392 self.dirstate.normal(f)
1389 1393 for f in changes[2]:
1390 1394 self.dirstate.drop(f)
1391 1395 self.dirstate.setparents(ret)
1392 1396 ms.reset()
1393 1397 finally:
1394 1398 wlock.release()
1395 1399
1396 1400 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1397 1401 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1398 1402 self._afterlock(commithook)
1399 1403 return ret
1400 1404
1401 1405 def commitctx(self, ctx, error=False):
1402 1406 """Add a new revision to current repository.
1403 1407 Revision information is passed via the context argument.
1404 1408 """
1405 1409
1406 1410 tr = lock = None
1407 1411 removed = list(ctx.removed())
1408 1412 p1, p2 = ctx.p1(), ctx.p2()
1409 1413 user = ctx.user()
1410 1414
1411 1415 lock = self.lock()
1412 1416 try:
1413 1417 tr = self.transaction("commit")
1414 1418 trp = weakref.proxy(tr)
1415 1419
1416 1420 if ctx.files():
1417 1421 m1 = p1.manifest().copy()
1418 1422 m2 = p2.manifest()
1419 1423
1420 1424 # check in files
1421 1425 new = {}
1422 1426 changed = []
1423 1427 linkrev = len(self)
1424 1428 for f in sorted(ctx.modified() + ctx.added()):
1425 1429 self.ui.note(f + "\n")
1426 1430 try:
1427 1431 fctx = ctx[f]
1428 1432 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1429 1433 changed)
1430 1434 m1.set(f, fctx.flags())
1431 1435 except OSError, inst:
1432 1436 self.ui.warn(_("trouble committing %s!\n") % f)
1433 1437 raise
1434 1438 except IOError, inst:
1435 1439 errcode = getattr(inst, 'errno', errno.ENOENT)
1436 1440 if error or errcode and errcode != errno.ENOENT:
1437 1441 self.ui.warn(_("trouble committing %s!\n") % f)
1438 1442 raise
1439 1443 else:
1440 1444 removed.append(f)
1441 1445
1442 1446 # update manifest
1443 1447 m1.update(new)
1444 1448 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1445 1449 drop = [f for f in removed if f in m1]
1446 1450 for f in drop:
1447 1451 del m1[f]
1448 1452 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1449 1453 p2.manifestnode(), (new, drop))
1450 1454 files = changed + removed
1451 1455 else:
1452 1456 mn = p1.manifestnode()
1453 1457 files = []
1454 1458
1455 1459 # update changelog
1456 1460 self.changelog.delayupdate()
1457 1461 n = self.changelog.add(mn, files, ctx.description(),
1458 1462 trp, p1.node(), p2.node(),
1459 1463 user, ctx.date(), ctx.extra().copy())
1460 1464 p = lambda: self.changelog.writepending() and self.root or ""
1461 1465 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1462 1466 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1463 1467 parent2=xp2, pending=p)
1464 1468 self.changelog.finalize(trp)
1465 1469 # set the new commit is proper phase
1466 1470 targetphase = phases.newcommitphase(self.ui)
1467 1471 if targetphase:
1468 1472 # retract boundary do not alter parent changeset.
1469 1473 # if a parent have higher the resulting phase will
1470 1474 # be compliant anyway
1471 1475 #
1472 1476 # if minimal phase was 0 we don't need to retract anything
1473 1477 phases.retractboundary(self, targetphase, [n])
1474 1478 tr.close()
1475 1479 self.updatebranchcache()
1476 1480 return n
1477 1481 finally:
1478 1482 if tr:
1479 1483 tr.release()
1480 1484 lock.release()
1481 1485
1482 1486 def destroyed(self, newheadnodes=None):
1483 1487 '''Inform the repository that nodes have been destroyed.
1484 1488 Intended for use by strip and rollback, so there's a common
1485 1489 place for anything that has to be done after destroying history.
1486 1490
1487 1491 If you know the branchheadcache was uptodate before nodes were removed
1488 1492 and you also know the set of candidate new heads that may have resulted
1489 1493 from the destruction, you can set newheadnodes. This will enable the
1490 1494 code to update the branchheads cache, rather than having future code
1491 1495 decide it's invalid and regenerating it from scratch.
1492 1496 '''
1493 1497 # If we have info, newheadnodes, on how to update the branch cache, do
1494 1498 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1495 1499 # will be caught the next time it is read.
1496 1500 if newheadnodes:
1497 1501 tiprev = len(self) - 1
1498 1502 ctxgen = (self[node] for node in newheadnodes
1499 1503 if self.changelog.hasnode(node))
1500 1504 self._updatebranchcache(self._branchcache, ctxgen)
1501 1505 self._writebranchcache(self._branchcache, self.changelog.tip(),
1502 1506 tiprev)
1503 1507
1504 1508 # Ensure the persistent tag cache is updated. Doing it now
1505 1509 # means that the tag cache only has to worry about destroyed
1506 1510 # heads immediately after a strip/rollback. That in turn
1507 1511 # guarantees that "cachetip == currenttip" (comparing both rev
1508 1512 # and node) always means no nodes have been added or destroyed.
1509 1513
1510 1514 # XXX this is suboptimal when qrefresh'ing: we strip the current
1511 1515 # head, refresh the tag cache, then immediately add a new head.
1512 1516 # But I think doing it this way is necessary for the "instant
1513 1517 # tag cache retrieval" case to work.
1514 1518 self.invalidatecaches()
1515 1519
1516 1520 # Discard all cache entries to force reloading everything.
1517 1521 self._filecache.clear()
1518 1522
1519 1523 def walk(self, match, node=None):
1520 1524 '''
1521 1525 walk recursively through the directory tree or a given
1522 1526 changeset, finding all files matched by the match
1523 1527 function
1524 1528 '''
1525 1529 return self[node].walk(match)
1526 1530
1527 1531 def status(self, node1='.', node2=None, match=None,
1528 1532 ignored=False, clean=False, unknown=False,
1529 1533 listsubrepos=False):
1530 1534 """return status of files between two nodes or node and working
1531 1535 directory.
1532 1536
1533 1537 If node1 is None, use the first dirstate parent instead.
1534 1538 If node2 is None, compare node1 with working directory.
1535 1539 """
1536 1540
1537 1541 def mfmatches(ctx):
1538 1542 mf = ctx.manifest().copy()
1539 1543 if match.always():
1540 1544 return mf
1541 1545 for fn in mf.keys():
1542 1546 if not match(fn):
1543 1547 del mf[fn]
1544 1548 return mf
1545 1549
1546 1550 if isinstance(node1, context.changectx):
1547 1551 ctx1 = node1
1548 1552 else:
1549 1553 ctx1 = self[node1]
1550 1554 if isinstance(node2, context.changectx):
1551 1555 ctx2 = node2
1552 1556 else:
1553 1557 ctx2 = self[node2]
1554 1558
1555 1559 working = ctx2.rev() is None
1556 1560 parentworking = working and ctx1 == self['.']
1557 1561 match = match or matchmod.always(self.root, self.getcwd())
1558 1562 listignored, listclean, listunknown = ignored, clean, unknown
1559 1563
1560 1564 # load earliest manifest first for caching reasons
1561 1565 if not working and ctx2.rev() < ctx1.rev():
1562 1566 ctx2.manifest()
1563 1567
1564 1568 if not parentworking:
1565 1569 def bad(f, msg):
1566 1570 # 'f' may be a directory pattern from 'match.files()',
1567 1571 # so 'f not in ctx1' is not enough
1568 1572 if f not in ctx1 and f not in ctx1.dirs():
1569 1573 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1570 1574 match.bad = bad
1571 1575
1572 1576 if working: # we need to scan the working dir
1573 1577 subrepos = []
1574 1578 if '.hgsub' in self.dirstate:
1575 1579 subrepos = ctx2.substate.keys()
1576 1580 s = self.dirstate.status(match, subrepos, listignored,
1577 1581 listclean, listunknown)
1578 1582 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1579 1583
1580 1584 # check for any possibly clean files
1581 1585 if parentworking and cmp:
1582 1586 fixup = []
1583 1587 # do a full compare of any files that might have changed
1584 1588 for f in sorted(cmp):
1585 1589 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1586 1590 or ctx1[f].cmp(ctx2[f])):
1587 1591 modified.append(f)
1588 1592 else:
1589 1593 fixup.append(f)
1590 1594
1591 1595 # update dirstate for files that are actually clean
1592 1596 if fixup:
1593 1597 if listclean:
1594 1598 clean += fixup
1595 1599
1596 1600 try:
1597 1601 # updating the dirstate is optional
1598 1602 # so we don't wait on the lock
1599 1603 wlock = self.wlock(False)
1600 1604 try:
1601 1605 for f in fixup:
1602 1606 self.dirstate.normal(f)
1603 1607 finally:
1604 1608 wlock.release()
1605 1609 except error.LockError:
1606 1610 pass
1607 1611
1608 1612 if not parentworking:
1609 1613 mf1 = mfmatches(ctx1)
1610 1614 if working:
1611 1615 # we are comparing working dir against non-parent
1612 1616 # generate a pseudo-manifest for the working dir
1613 1617 mf2 = mfmatches(self['.'])
1614 1618 for f in cmp + modified + added:
1615 1619 mf2[f] = None
1616 1620 mf2.set(f, ctx2.flags(f))
1617 1621 for f in removed:
1618 1622 if f in mf2:
1619 1623 del mf2[f]
1620 1624 else:
1621 1625 # we are comparing two revisions
1622 1626 deleted, unknown, ignored = [], [], []
1623 1627 mf2 = mfmatches(ctx2)
1624 1628
1625 1629 modified, added, clean = [], [], []
1626 1630 withflags = mf1.withflags() | mf2.withflags()
1627 1631 for fn in mf2:
1628 1632 if fn in mf1:
1629 1633 if (fn not in deleted and
1630 1634 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1631 1635 (mf1[fn] != mf2[fn] and
1632 1636 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1633 1637 modified.append(fn)
1634 1638 elif listclean:
1635 1639 clean.append(fn)
1636 1640 del mf1[fn]
1637 1641 elif fn not in deleted:
1638 1642 added.append(fn)
1639 1643 removed = mf1.keys()
1640 1644
1641 1645 if working and modified and not self.dirstate._checklink:
1642 1646 # Symlink placeholders may get non-symlink-like contents
1643 1647 # via user error or dereferencing by NFS or Samba servers,
1644 1648 # so we filter out any placeholders that don't look like a
1645 1649 # symlink
1646 1650 sane = []
1647 1651 for f in modified:
1648 1652 if ctx2.flags(f) == 'l':
1649 1653 d = ctx2[f].data()
1650 1654 if len(d) >= 1024 or '\n' in d or util.binary(d):
1651 1655 self.ui.debug('ignoring suspect symlink placeholder'
1652 1656 ' "%s"\n' % f)
1653 1657 continue
1654 1658 sane.append(f)
1655 1659 modified = sane
1656 1660
1657 1661 r = modified, added, removed, deleted, unknown, ignored, clean
1658 1662
1659 1663 if listsubrepos:
1660 1664 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1661 1665 if working:
1662 1666 rev2 = None
1663 1667 else:
1664 1668 rev2 = ctx2.substate[subpath][1]
1665 1669 try:
1666 1670 submatch = matchmod.narrowmatcher(subpath, match)
1667 1671 s = sub.status(rev2, match=submatch, ignored=listignored,
1668 1672 clean=listclean, unknown=listunknown,
1669 1673 listsubrepos=True)
1670 1674 for rfiles, sfiles in zip(r, s):
1671 1675 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1672 1676 except error.LookupError:
1673 1677 self.ui.status(_("skipping missing subrepository: %s\n")
1674 1678 % subpath)
1675 1679
1676 1680 for l in r:
1677 1681 l.sort()
1678 1682 return r
1679 1683
1680 1684 def heads(self, start=None):
1681 1685 heads = self.changelog.heads(start)
1682 1686 # sort the output in rev descending order
1683 1687 return sorted(heads, key=self.changelog.rev, reverse=True)
1684 1688
1685 1689 def branchheads(self, branch=None, start=None, closed=False):
1686 1690 '''return a (possibly filtered) list of heads for the given branch
1687 1691
1688 1692 Heads are returned in topological order, from newest to oldest.
1689 1693 If branch is None, use the dirstate branch.
1690 1694 If start is not None, return only heads reachable from start.
1691 1695 If closed is True, return heads that are marked as closed as well.
1692 1696 '''
1693 1697 if branch is None:
1694 1698 branch = self[None].branch()
1695 1699 branches = self.branchmap()
1696 1700 if branch not in branches:
1697 1701 return []
1698 1702 # the cache returns heads ordered lowest to highest
1699 1703 bheads = list(reversed(branches[branch]))
1700 1704 if start is not None:
1701 1705 # filter out the heads that cannot be reached from startrev
1702 1706 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1703 1707 bheads = [h for h in bheads if h in fbheads]
1704 1708 if not closed:
1705 1709 bheads = [h for h in bheads if not self[h].closesbranch()]
1706 1710 return bheads
1707 1711
1708 1712 def branches(self, nodes):
1709 1713 if not nodes:
1710 1714 nodes = [self.changelog.tip()]
1711 1715 b = []
1712 1716 for n in nodes:
1713 1717 t = n
1714 1718 while True:
1715 1719 p = self.changelog.parents(n)
1716 1720 if p[1] != nullid or p[0] == nullid:
1717 1721 b.append((t, n, p[0], p[1]))
1718 1722 break
1719 1723 n = p[0]
1720 1724 return b
1721 1725
1722 1726 def between(self, pairs):
1723 1727 r = []
1724 1728
1725 1729 for top, bottom in pairs:
1726 1730 n, l, i = top, [], 0
1727 1731 f = 1
1728 1732
1729 1733 while n != bottom and n != nullid:
1730 1734 p = self.changelog.parents(n)[0]
1731 1735 if i == f:
1732 1736 l.append(n)
1733 1737 f = f * 2
1734 1738 n = p
1735 1739 i += 1
1736 1740
1737 1741 r.append(l)
1738 1742
1739 1743 return r
1740 1744
1741 1745 def pull(self, remote, heads=None, force=False):
1742 1746 # don't open transaction for nothing or you break future useful
1743 1747 # rollback call
1744 1748 tr = None
1745 1749 trname = 'pull\n' + util.hidepassword(remote.url())
1746 1750 lock = self.lock()
1747 1751 try:
1748 1752 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1749 1753 force=force)
1750 1754 common, fetch, rheads = tmp
1751 1755 if not fetch:
1752 1756 self.ui.status(_("no changes found\n"))
1753 1757 added = []
1754 1758 result = 0
1755 1759 else:
1756 1760 tr = self.transaction(trname)
1757 1761 if heads is None and list(common) == [nullid]:
1758 1762 self.ui.status(_("requesting all changes\n"))
1759 1763 elif heads is None and remote.capable('changegroupsubset'):
1760 1764 # issue1320, avoid a race if remote changed after discovery
1761 1765 heads = rheads
1762 1766
1763 1767 if remote.capable('getbundle'):
1764 1768 cg = remote.getbundle('pull', common=common,
1765 1769 heads=heads or rheads)
1766 1770 elif heads is None:
1767 1771 cg = remote.changegroup(fetch, 'pull')
1768 1772 elif not remote.capable('changegroupsubset'):
1769 1773 raise util.Abort(_("partial pull cannot be done because "
1770 1774 "other repository doesn't support "
1771 1775 "changegroupsubset."))
1772 1776 else:
1773 1777 cg = remote.changegroupsubset(fetch, heads, 'pull')
1774 1778 clstart = len(self.changelog)
1775 1779 result = self.addchangegroup(cg, 'pull', remote.url())
1776 1780 clend = len(self.changelog)
1777 1781 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1778 1782
1779 1783 # compute target subset
1780 1784 if heads is None:
1781 1785 # We pulled every thing possible
1782 1786 # sync on everything common
1783 1787 subset = common + added
1784 1788 else:
1785 1789 # We pulled a specific subset
1786 1790 # sync on this subset
1787 1791 subset = heads
1788 1792
1789 1793 # Get remote phases data from remote
1790 1794 remotephases = remote.listkeys('phases')
1791 1795 publishing = bool(remotephases.get('publishing', False))
1792 1796 if remotephases and not publishing:
1793 1797 # remote is new and unpublishing
1794 1798 pheads, _dr = phases.analyzeremotephases(self, subset,
1795 1799 remotephases)
1796 1800 phases.advanceboundary(self, phases.public, pheads)
1797 1801 phases.advanceboundary(self, phases.draft, subset)
1798 1802 else:
1799 1803 # Remote is old or publishing all common changesets
1800 1804 # should be seen as public
1801 1805 phases.advanceboundary(self, phases.public, subset)
1802 1806
1803 1807 if obsolete._enabled:
1804 1808 self.ui.debug('fetching remote obsolete markers')
1805 1809 remoteobs = remote.listkeys('obsolete')
1806 1810 if 'dump0' in remoteobs:
1807 1811 if tr is None:
1808 1812 tr = self.transaction(trname)
1809 1813 for key in sorted(remoteobs, reverse=True):
1810 1814 if key.startswith('dump'):
1811 1815 data = base85.b85decode(remoteobs[key])
1812 1816 self.obsstore.mergemarkers(tr, data)
1813 1817 if tr is not None:
1814 1818 tr.close()
1815 1819 finally:
1816 1820 if tr is not None:
1817 1821 tr.release()
1818 1822 lock.release()
1819 1823
1820 1824 return result
1821 1825
1822 1826 def checkpush(self, force, revs):
1823 1827 """Extensions can override this function if additional checks have
1824 1828 to be performed before pushing, or call it if they override push
1825 1829 command.
1826 1830 """
1827 1831 pass
1828 1832
1829 1833 def push(self, remote, force=False, revs=None, newbranch=False):
1830 1834 '''Push outgoing changesets (limited by revs) from the current
1831 1835 repository to remote. Return an integer:
1832 1836 - None means nothing to push
1833 1837 - 0 means HTTP error
1834 1838 - 1 means we pushed and remote head count is unchanged *or*
1835 1839 we have outgoing changesets but refused to push
1836 1840 - other values as described by addchangegroup()
1837 1841 '''
1838 1842 # there are two ways to push to remote repo:
1839 1843 #
1840 1844 # addchangegroup assumes local user can lock remote
1841 1845 # repo (local filesystem, old ssh servers).
1842 1846 #
1843 1847 # unbundle assumes local user cannot lock remote repo (new ssh
1844 1848 # servers, http servers).
1845 1849
1846 1850 if not remote.canpush():
1847 1851 raise util.Abort(_("destination does not support push"))
1848 1852 # get local lock as we might write phase data
1849 1853 locallock = self.lock()
1850 1854 try:
1851 1855 self.checkpush(force, revs)
1852 1856 lock = None
1853 1857 unbundle = remote.capable('unbundle')
1854 1858 if not unbundle:
1855 1859 lock = remote.lock()
1856 1860 try:
1857 1861 # discovery
1858 1862 fci = discovery.findcommonincoming
1859 1863 commoninc = fci(self, remote, force=force)
1860 1864 common, inc, remoteheads = commoninc
1861 1865 fco = discovery.findcommonoutgoing
1862 1866 outgoing = fco(self, remote, onlyheads=revs,
1863 1867 commoninc=commoninc, force=force)
1864 1868
1865 1869
1866 1870 if not outgoing.missing:
1867 1871 # nothing to push
1868 1872 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1869 1873 ret = None
1870 1874 else:
1871 1875 # something to push
1872 1876 if not force:
1873 1877 # if self.obsstore == False --> no obsolete
1874 1878 # then, save the iteration
1875 1879 if self.obsstore:
1876 1880 # this message are here for 80 char limit reason
1877 1881 mso = _("push includes an obsolete changeset: %s!")
1878 1882 msu = _("push includes an unstable changeset: %s!")
1879 1883 # If we are to push if there is at least one
1880 1884 # obsolete or unstable changeset in missing, at
1881 1885 # least one of the missinghead will be obsolete or
1882 1886 # unstable. So checking heads only is ok
1883 1887 for node in outgoing.missingheads:
1884 1888 ctx = self[node]
1885 1889 if ctx.obsolete():
1886 1890 raise util.Abort(_(mso) % ctx)
1887 1891 elif ctx.unstable():
1888 1892 raise util.Abort(_(msu) % ctx)
1889 1893 discovery.checkheads(self, remote, outgoing,
1890 1894 remoteheads, newbranch,
1891 1895 bool(inc))
1892 1896
1893 1897 # create a changegroup from local
1894 1898 if revs is None and not outgoing.excluded:
1895 1899 # push everything,
1896 1900 # use the fast path, no race possible on push
1897 1901 cg = self._changegroup(outgoing.missing, 'push')
1898 1902 else:
1899 1903 cg = self.getlocalbundle('push', outgoing)
1900 1904
1901 1905 # apply changegroup to remote
1902 1906 if unbundle:
1903 1907 # local repo finds heads on server, finds out what
1904 1908 # revs it must push. once revs transferred, if server
1905 1909 # finds it has different heads (someone else won
1906 1910 # commit/push race), server aborts.
1907 1911 if force:
1908 1912 remoteheads = ['force']
1909 1913 # ssh: return remote's addchangegroup()
1910 1914 # http: return remote's addchangegroup() or 0 for error
1911 1915 ret = remote.unbundle(cg, remoteheads, 'push')
1912 1916 else:
1913 1917 # we return an integer indicating remote head count
1914 1918 # change
1915 1919 ret = remote.addchangegroup(cg, 'push', self.url())
1916 1920
1917 1921 if ret:
1918 1922 # push succeed, synchronize target of the push
1919 1923 cheads = outgoing.missingheads
1920 1924 elif revs is None:
1921 1925 # All out push fails. synchronize all common
1922 1926 cheads = outgoing.commonheads
1923 1927 else:
1924 1928 # I want cheads = heads(::missingheads and ::commonheads)
1925 1929 # (missingheads is revs with secret changeset filtered out)
1926 1930 #
1927 1931 # This can be expressed as:
1928 1932 # cheads = ( (missingheads and ::commonheads)
1929 1933 # + (commonheads and ::missingheads))"
1930 1934 # )
1931 1935 #
1932 1936 # while trying to push we already computed the following:
1933 1937 # common = (::commonheads)
1934 1938 # missing = ((commonheads::missingheads) - commonheads)
1935 1939 #
1936 1940 # We can pick:
1937 1941 # * missingheads part of common (::commonheads)
1938 1942 common = set(outgoing.common)
1939 1943 cheads = [node for node in revs if node in common]
1940 1944 # and
1941 1945 # * commonheads parents on missing
1942 1946 revset = self.set('%ln and parents(roots(%ln))',
1943 1947 outgoing.commonheads,
1944 1948 outgoing.missing)
1945 1949 cheads.extend(c.node() for c in revset)
1946 1950 # even when we don't push, exchanging phase data is useful
1947 1951 remotephases = remote.listkeys('phases')
1948 1952 if not remotephases: # old server or public only repo
1949 1953 phases.advanceboundary(self, phases.public, cheads)
1950 1954 # don't push any phase data as there is nothing to push
1951 1955 else:
1952 1956 ana = phases.analyzeremotephases(self, cheads, remotephases)
1953 1957 pheads, droots = ana
1954 1958 ### Apply remote phase on local
1955 1959 if remotephases.get('publishing', False):
1956 1960 phases.advanceboundary(self, phases.public, cheads)
1957 1961 else: # publish = False
1958 1962 phases.advanceboundary(self, phases.public, pheads)
1959 1963 phases.advanceboundary(self, phases.draft, cheads)
1960 1964 ### Apply local phase on remote
1961 1965
1962 1966 # Get the list of all revs draft on remote by public here.
1963 1967 # XXX Beware that revset break if droots is not strictly
1964 1968 # XXX root we may want to ensure it is but it is costly
1965 1969 outdated = self.set('heads((%ln::%ln) and public())',
1966 1970 droots, cheads)
1967 1971 for newremotehead in outdated:
1968 1972 r = remote.pushkey('phases',
1969 1973 newremotehead.hex(),
1970 1974 str(phases.draft),
1971 1975 str(phases.public))
1972 1976 if not r:
1973 1977 self.ui.warn(_('updating %s to public failed!\n')
1974 1978 % newremotehead)
1975 1979 self.ui.debug('try to push obsolete markers to remote\n')
1976 1980 if (obsolete._enabled and self.obsstore and
1977 1981 'obsolete' in remote.listkeys('namespaces')):
1978 1982 rslts = []
1979 1983 remotedata = self.listkeys('obsolete')
1980 1984 for key in sorted(remotedata, reverse=True):
1981 1985 # reverse sort to ensure we end with dump0
1982 1986 data = remotedata[key]
1983 1987 rslts.append(remote.pushkey('obsolete', key, '', data))
1984 1988 if [r for r in rslts if not r]:
1985 1989 msg = _('failed to push some obsolete markers!\n')
1986 1990 self.ui.warn(msg)
1987 1991 finally:
1988 1992 if lock is not None:
1989 1993 lock.release()
1990 1994 finally:
1991 1995 locallock.release()
1992 1996
1993 1997 self.ui.debug("checking for updated bookmarks\n")
1994 1998 rb = remote.listkeys('bookmarks')
1995 1999 for k in rb.keys():
1996 2000 if k in self._bookmarks:
1997 2001 nr, nl = rb[k], hex(self._bookmarks[k])
1998 2002 if nr in self:
1999 2003 cr = self[nr]
2000 2004 cl = self[nl]
2001 2005 if bookmarks.validdest(self, cr, cl):
2002 2006 r = remote.pushkey('bookmarks', k, nr, nl)
2003 2007 if r:
2004 2008 self.ui.status(_("updating bookmark %s\n") % k)
2005 2009 else:
2006 2010 self.ui.warn(_('updating bookmark %s'
2007 2011 ' failed!\n') % k)
2008 2012
2009 2013 return ret
2010 2014
2011 2015 def changegroupinfo(self, nodes, source):
2012 2016 if self.ui.verbose or source == 'bundle':
2013 2017 self.ui.status(_("%d changesets found\n") % len(nodes))
2014 2018 if self.ui.debugflag:
2015 2019 self.ui.debug("list of changesets:\n")
2016 2020 for node in nodes:
2017 2021 self.ui.debug("%s\n" % hex(node))
2018 2022
2019 2023 def changegroupsubset(self, bases, heads, source):
2020 2024 """Compute a changegroup consisting of all the nodes that are
2021 2025 descendants of any of the bases and ancestors of any of the heads.
2022 2026 Return a chunkbuffer object whose read() method will return
2023 2027 successive changegroup chunks.
2024 2028
2025 2029 It is fairly complex as determining which filenodes and which
2026 2030 manifest nodes need to be included for the changeset to be complete
2027 2031 is non-trivial.
2028 2032
2029 2033 Another wrinkle is doing the reverse, figuring out which changeset in
2030 2034 the changegroup a particular filenode or manifestnode belongs to.
2031 2035 """
2032 2036 cl = self.changelog
2033 2037 if not bases:
2034 2038 bases = [nullid]
2035 2039 csets, bases, heads = cl.nodesbetween(bases, heads)
2036 2040 # We assume that all ancestors of bases are known
2037 2041 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2038 2042 return self._changegroupsubset(common, csets, heads, source)
2039 2043
2040 2044 def getlocalbundle(self, source, outgoing):
2041 2045 """Like getbundle, but taking a discovery.outgoing as an argument.
2042 2046
2043 2047 This is only implemented for local repos and reuses potentially
2044 2048 precomputed sets in outgoing."""
2045 2049 if not outgoing.missing:
2046 2050 return None
2047 2051 return self._changegroupsubset(outgoing.common,
2048 2052 outgoing.missing,
2049 2053 outgoing.missingheads,
2050 2054 source)
2051 2055
2052 2056 def getbundle(self, source, heads=None, common=None):
2053 2057 """Like changegroupsubset, but returns the set difference between the
2054 2058 ancestors of heads and the ancestors common.
2055 2059
2056 2060 If heads is None, use the local heads. If common is None, use [nullid].
2057 2061
2058 2062 The nodes in common might not all be known locally due to the way the
2059 2063 current discovery protocol works.
2060 2064 """
2061 2065 cl = self.changelog
2062 2066 if common:
2063 2067 nm = cl.nodemap
2064 2068 common = [n for n in common if n in nm]
2065 2069 else:
2066 2070 common = [nullid]
2067 2071 if not heads:
2068 2072 heads = cl.heads()
2069 2073 return self.getlocalbundle(source,
2070 2074 discovery.outgoing(cl, common, heads))
2071 2075
2072 2076 def _changegroupsubset(self, commonrevs, csets, heads, source):
2073 2077
2074 2078 cl = self.changelog
2075 2079 mf = self.manifest
2076 2080 mfs = {} # needed manifests
2077 2081 fnodes = {} # needed file nodes
2078 2082 changedfiles = set()
2079 2083 fstate = ['', {}]
2080 2084 count = [0, 0]
2081 2085
2082 2086 # can we go through the fast path ?
2083 2087 heads.sort()
2084 2088 if heads == sorted(self.heads()):
2085 2089 return self._changegroup(csets, source)
2086 2090
2087 2091 # slow path
2088 2092 self.hook('preoutgoing', throw=True, source=source)
2089 2093 self.changegroupinfo(csets, source)
2090 2094
2091 2095 # filter any nodes that claim to be part of the known set
2092 2096 def prune(revlog, missing):
2093 2097 rr, rl = revlog.rev, revlog.linkrev
2094 2098 return [n for n in missing
2095 2099 if rl(rr(n)) not in commonrevs]
2096 2100
2097 2101 progress = self.ui.progress
2098 2102 _bundling = _('bundling')
2099 2103 _changesets = _('changesets')
2100 2104 _manifests = _('manifests')
2101 2105 _files = _('files')
2102 2106
2103 2107 def lookup(revlog, x):
2104 2108 if revlog == cl:
2105 2109 c = cl.read(x)
2106 2110 changedfiles.update(c[3])
2107 2111 mfs.setdefault(c[0], x)
2108 2112 count[0] += 1
2109 2113 progress(_bundling, count[0],
2110 2114 unit=_changesets, total=count[1])
2111 2115 return x
2112 2116 elif revlog == mf:
2113 2117 clnode = mfs[x]
2114 2118 mdata = mf.readfast(x)
2115 2119 for f, n in mdata.iteritems():
2116 2120 if f in changedfiles:
2117 2121 fnodes[f].setdefault(n, clnode)
2118 2122 count[0] += 1
2119 2123 progress(_bundling, count[0],
2120 2124 unit=_manifests, total=count[1])
2121 2125 return clnode
2122 2126 else:
2123 2127 progress(_bundling, count[0], item=fstate[0],
2124 2128 unit=_files, total=count[1])
2125 2129 return fstate[1][x]
2126 2130
2127 2131 bundler = changegroup.bundle10(lookup)
2128 2132 reorder = self.ui.config('bundle', 'reorder', 'auto')
2129 2133 if reorder == 'auto':
2130 2134 reorder = None
2131 2135 else:
2132 2136 reorder = util.parsebool(reorder)
2133 2137
2134 2138 def gengroup():
2135 2139 # Create a changenode group generator that will call our functions
2136 2140 # back to lookup the owning changenode and collect information.
2137 2141 count[:] = [0, len(csets)]
2138 2142 for chunk in cl.group(csets, bundler, reorder=reorder):
2139 2143 yield chunk
2140 2144 progress(_bundling, None)
2141 2145
2142 2146 # Create a generator for the manifestnodes that calls our lookup
2143 2147 # and data collection functions back.
2144 2148 for f in changedfiles:
2145 2149 fnodes[f] = {}
2146 2150 count[:] = [0, len(mfs)]
2147 2151 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2148 2152 yield chunk
2149 2153 progress(_bundling, None)
2150 2154
2151 2155 mfs.clear()
2152 2156
2153 2157 # Go through all our files in order sorted by name.
2154 2158 count[:] = [0, len(changedfiles)]
2155 2159 for fname in sorted(changedfiles):
2156 2160 filerevlog = self.file(fname)
2157 2161 if not len(filerevlog):
2158 2162 raise util.Abort(_("empty or missing revlog for %s")
2159 2163 % fname)
2160 2164 fstate[0] = fname
2161 2165 fstate[1] = fnodes.pop(fname, {})
2162 2166
2163 2167 nodelist = prune(filerevlog, fstate[1])
2164 2168 if nodelist:
2165 2169 count[0] += 1
2166 2170 yield bundler.fileheader(fname)
2167 2171 for chunk in filerevlog.group(nodelist, bundler, reorder):
2168 2172 yield chunk
2169 2173
2170 2174 # Signal that no more groups are left.
2171 2175 yield bundler.close()
2172 2176 progress(_bundling, None)
2173 2177
2174 2178 if csets:
2175 2179 self.hook('outgoing', node=hex(csets[0]), source=source)
2176 2180
2177 2181 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2178 2182
2179 2183 def changegroup(self, basenodes, source):
2180 2184 # to avoid a race we use changegroupsubset() (issue1320)
2181 2185 return self.changegroupsubset(basenodes, self.heads(), source)
2182 2186
2183 2187 def _changegroup(self, nodes, source):
2184 2188 """Compute the changegroup of all nodes that we have that a recipient
2185 2189 doesn't. Return a chunkbuffer object whose read() method will return
2186 2190 successive changegroup chunks.
2187 2191
2188 2192 This is much easier than the previous function as we can assume that
2189 2193 the recipient has any changenode we aren't sending them.
2190 2194
2191 2195 nodes is the set of nodes to send"""
2192 2196
2193 2197 cl = self.changelog
2194 2198 mf = self.manifest
2195 2199 mfs = {}
2196 2200 changedfiles = set()
2197 2201 fstate = ['']
2198 2202 count = [0, 0]
2199 2203
2200 2204 self.hook('preoutgoing', throw=True, source=source)
2201 2205 self.changegroupinfo(nodes, source)
2202 2206
2203 2207 revset = set([cl.rev(n) for n in nodes])
2204 2208
2205 2209 def gennodelst(log):
2206 2210 ln, llr = log.node, log.linkrev
2207 2211 return [ln(r) for r in log if llr(r) in revset]
2208 2212
2209 2213 progress = self.ui.progress
2210 2214 _bundling = _('bundling')
2211 2215 _changesets = _('changesets')
2212 2216 _manifests = _('manifests')
2213 2217 _files = _('files')
2214 2218
2215 2219 def lookup(revlog, x):
2216 2220 if revlog == cl:
2217 2221 c = cl.read(x)
2218 2222 changedfiles.update(c[3])
2219 2223 mfs.setdefault(c[0], x)
2220 2224 count[0] += 1
2221 2225 progress(_bundling, count[0],
2222 2226 unit=_changesets, total=count[1])
2223 2227 return x
2224 2228 elif revlog == mf:
2225 2229 count[0] += 1
2226 2230 progress(_bundling, count[0],
2227 2231 unit=_manifests, total=count[1])
2228 2232 return cl.node(revlog.linkrev(revlog.rev(x)))
2229 2233 else:
2230 2234 progress(_bundling, count[0], item=fstate[0],
2231 2235 total=count[1], unit=_files)
2232 2236 return cl.node(revlog.linkrev(revlog.rev(x)))
2233 2237
2234 2238 bundler = changegroup.bundle10(lookup)
2235 2239 reorder = self.ui.config('bundle', 'reorder', 'auto')
2236 2240 if reorder == 'auto':
2237 2241 reorder = None
2238 2242 else:
2239 2243 reorder = util.parsebool(reorder)
2240 2244
2241 2245 def gengroup():
2242 2246 '''yield a sequence of changegroup chunks (strings)'''
2243 2247 # construct a list of all changed files
2244 2248
2245 2249 count[:] = [0, len(nodes)]
2246 2250 for chunk in cl.group(nodes, bundler, reorder=reorder):
2247 2251 yield chunk
2248 2252 progress(_bundling, None)
2249 2253
2250 2254 count[:] = [0, len(mfs)]
2251 2255 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2252 2256 yield chunk
2253 2257 progress(_bundling, None)
2254 2258
2255 2259 count[:] = [0, len(changedfiles)]
2256 2260 for fname in sorted(changedfiles):
2257 2261 filerevlog = self.file(fname)
2258 2262 if not len(filerevlog):
2259 2263 raise util.Abort(_("empty or missing revlog for %s")
2260 2264 % fname)
2261 2265 fstate[0] = fname
2262 2266 nodelist = gennodelst(filerevlog)
2263 2267 if nodelist:
2264 2268 count[0] += 1
2265 2269 yield bundler.fileheader(fname)
2266 2270 for chunk in filerevlog.group(nodelist, bundler, reorder):
2267 2271 yield chunk
2268 2272 yield bundler.close()
2269 2273 progress(_bundling, None)
2270 2274
2271 2275 if nodes:
2272 2276 self.hook('outgoing', node=hex(nodes[0]), source=source)
2273 2277
2274 2278 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2275 2279
2276 2280 def addchangegroup(self, source, srctype, url, emptyok=False):
2277 2281 """Add the changegroup returned by source.read() to this repo.
2278 2282 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2279 2283 the URL of the repo where this changegroup is coming from.
2280 2284
2281 2285 Return an integer summarizing the change to this repo:
2282 2286 - nothing changed or no source: 0
2283 2287 - more heads than before: 1+added heads (2..n)
2284 2288 - fewer heads than before: -1-removed heads (-2..-n)
2285 2289 - number of heads stays the same: 1
2286 2290 """
2287 2291 def csmap(x):
2288 2292 self.ui.debug("add changeset %s\n" % short(x))
2289 2293 return len(cl)
2290 2294
2291 2295 def revmap(x):
2292 2296 return cl.rev(x)
2293 2297
2294 2298 if not source:
2295 2299 return 0
2296 2300
2297 2301 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2298 2302
2299 2303 changesets = files = revisions = 0
2300 2304 efiles = set()
2301 2305
2302 2306 # write changelog data to temp files so concurrent readers will not see
2303 2307 # inconsistent view
2304 2308 cl = self.changelog
2305 2309 cl.delayupdate()
2306 2310 oldheads = cl.heads()
2307 2311
2308 2312 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2309 2313 try:
2310 2314 trp = weakref.proxy(tr)
2311 2315 # pull off the changeset group
2312 2316 self.ui.status(_("adding changesets\n"))
2313 2317 clstart = len(cl)
2314 2318 class prog(object):
2315 2319 step = _('changesets')
2316 2320 count = 1
2317 2321 ui = self.ui
2318 2322 total = None
2319 2323 def __call__(self):
2320 2324 self.ui.progress(self.step, self.count, unit=_('chunks'),
2321 2325 total=self.total)
2322 2326 self.count += 1
2323 2327 pr = prog()
2324 2328 source.callback = pr
2325 2329
2326 2330 source.changelogheader()
2327 2331 srccontent = cl.addgroup(source, csmap, trp)
2328 2332 if not (srccontent or emptyok):
2329 2333 raise util.Abort(_("received changelog group is empty"))
2330 2334 clend = len(cl)
2331 2335 changesets = clend - clstart
2332 2336 for c in xrange(clstart, clend):
2333 2337 efiles.update(self[c].files())
2334 2338 efiles = len(efiles)
2335 2339 self.ui.progress(_('changesets'), None)
2336 2340
2337 2341 # pull off the manifest group
2338 2342 self.ui.status(_("adding manifests\n"))
2339 2343 pr.step = _('manifests')
2340 2344 pr.count = 1
2341 2345 pr.total = changesets # manifests <= changesets
2342 2346 # no need to check for empty manifest group here:
2343 2347 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2344 2348 # no new manifest will be created and the manifest group will
2345 2349 # be empty during the pull
2346 2350 source.manifestheader()
2347 2351 self.manifest.addgroup(source, revmap, trp)
2348 2352 self.ui.progress(_('manifests'), None)
2349 2353
2350 2354 needfiles = {}
2351 2355 if self.ui.configbool('server', 'validate', default=False):
2352 2356 # validate incoming csets have their manifests
2353 2357 for cset in xrange(clstart, clend):
2354 2358 mfest = self.changelog.read(self.changelog.node(cset))[0]
2355 2359 mfest = self.manifest.readdelta(mfest)
2356 2360 # store file nodes we must see
2357 2361 for f, n in mfest.iteritems():
2358 2362 needfiles.setdefault(f, set()).add(n)
2359 2363
2360 2364 # process the files
2361 2365 self.ui.status(_("adding file changes\n"))
2362 2366 pr.step = _('files')
2363 2367 pr.count = 1
2364 2368 pr.total = efiles
2365 2369 source.callback = None
2366 2370
2367 2371 while True:
2368 2372 chunkdata = source.filelogheader()
2369 2373 if not chunkdata:
2370 2374 break
2371 2375 f = chunkdata["filename"]
2372 2376 self.ui.debug("adding %s revisions\n" % f)
2373 2377 pr()
2374 2378 fl = self.file(f)
2375 2379 o = len(fl)
2376 2380 if not fl.addgroup(source, revmap, trp):
2377 2381 raise util.Abort(_("received file revlog group is empty"))
2378 2382 revisions += len(fl) - o
2379 2383 files += 1
2380 2384 if f in needfiles:
2381 2385 needs = needfiles[f]
2382 2386 for new in xrange(o, len(fl)):
2383 2387 n = fl.node(new)
2384 2388 if n in needs:
2385 2389 needs.remove(n)
2386 2390 if not needs:
2387 2391 del needfiles[f]
2388 2392 self.ui.progress(_('files'), None)
2389 2393
2390 2394 for f, needs in needfiles.iteritems():
2391 2395 fl = self.file(f)
2392 2396 for n in needs:
2393 2397 try:
2394 2398 fl.rev(n)
2395 2399 except error.LookupError:
2396 2400 raise util.Abort(
2397 2401 _('missing file data for %s:%s - run hg verify') %
2398 2402 (f, hex(n)))
2399 2403
2400 2404 dh = 0
2401 2405 if oldheads:
2402 2406 heads = cl.heads()
2403 2407 dh = len(heads) - len(oldheads)
2404 2408 for h in heads:
2405 2409 if h not in oldheads and self[h].closesbranch():
2406 2410 dh -= 1
2407 2411 htext = ""
2408 2412 if dh:
2409 2413 htext = _(" (%+d heads)") % dh
2410 2414
2411 2415 self.ui.status(_("added %d changesets"
2412 2416 " with %d changes to %d files%s\n")
2413 2417 % (changesets, revisions, files, htext))
2414 2418 obsolete.clearobscaches(self)
2415 2419
2416 2420 if changesets > 0:
2417 2421 p = lambda: cl.writepending() and self.root or ""
2418 2422 self.hook('pretxnchangegroup', throw=True,
2419 2423 node=hex(cl.node(clstart)), source=srctype,
2420 2424 url=url, pending=p)
2421 2425
2422 2426 added = [cl.node(r) for r in xrange(clstart, clend)]
2423 2427 publishing = self.ui.configbool('phases', 'publish', True)
2424 2428 if srctype == 'push':
2425 2429 # Old server can not push the boundary themself.
2426 2430 # New server won't push the boundary if changeset already
2427 2431 # existed locally as secrete
2428 2432 #
2429 2433 # We should not use added here but the list of all change in
2430 2434 # the bundle
2431 2435 if publishing:
2432 2436 phases.advanceboundary(self, phases.public, srccontent)
2433 2437 else:
2434 2438 phases.advanceboundary(self, phases.draft, srccontent)
2435 2439 phases.retractboundary(self, phases.draft, added)
2436 2440 elif srctype != 'strip':
2437 2441 # publishing only alter behavior during push
2438 2442 #
2439 2443 # strip should not touch boundary at all
2440 2444 phases.retractboundary(self, phases.draft, added)
2441 2445
2442 2446 # make changelog see real files again
2443 2447 cl.finalize(trp)
2444 2448
2445 2449 tr.close()
2446 2450
2447 2451 if changesets > 0:
2448 2452 self.updatebranchcache()
2449 2453 def runhooks():
2450 2454 # forcefully update the on-disk branch cache
2451 2455 self.ui.debug("updating the branch cache\n")
2452 2456 self.hook("changegroup", node=hex(cl.node(clstart)),
2453 2457 source=srctype, url=url)
2454 2458
2455 2459 for n in added:
2456 2460 self.hook("incoming", node=hex(n), source=srctype,
2457 2461 url=url)
2458 2462 self._afterlock(runhooks)
2459 2463
2460 2464 finally:
2461 2465 tr.release()
2462 2466 # never return 0 here:
2463 2467 if dh < 0:
2464 2468 return dh - 1
2465 2469 else:
2466 2470 return dh + 1
2467 2471
2468 2472 def stream_in(self, remote, requirements):
2469 2473 lock = self.lock()
2470 2474 try:
2471 2475 fp = remote.stream_out()
2472 2476 l = fp.readline()
2473 2477 try:
2474 2478 resp = int(l)
2475 2479 except ValueError:
2476 2480 raise error.ResponseError(
2477 2481 _('unexpected response from remote server:'), l)
2478 2482 if resp == 1:
2479 2483 raise util.Abort(_('operation forbidden by server'))
2480 2484 elif resp == 2:
2481 2485 raise util.Abort(_('locking the remote repository failed'))
2482 2486 elif resp != 0:
2483 2487 raise util.Abort(_('the server sent an unknown error code'))
2484 2488 self.ui.status(_('streaming all changes\n'))
2485 2489 l = fp.readline()
2486 2490 try:
2487 2491 total_files, total_bytes = map(int, l.split(' ', 1))
2488 2492 except (ValueError, TypeError):
2489 2493 raise error.ResponseError(
2490 2494 _('unexpected response from remote server:'), l)
2491 2495 self.ui.status(_('%d files to transfer, %s of data\n') %
2492 2496 (total_files, util.bytecount(total_bytes)))
2493 2497 handled_bytes = 0
2494 2498 self.ui.progress(_('clone'), 0, total=total_bytes)
2495 2499 start = time.time()
2496 2500 for i in xrange(total_files):
2497 2501 # XXX doesn't support '\n' or '\r' in filenames
2498 2502 l = fp.readline()
2499 2503 try:
2500 2504 name, size = l.split('\0', 1)
2501 2505 size = int(size)
2502 2506 except (ValueError, TypeError):
2503 2507 raise error.ResponseError(
2504 2508 _('unexpected response from remote server:'), l)
2505 2509 if self.ui.debugflag:
2506 2510 self.ui.debug('adding %s (%s)\n' %
2507 2511 (name, util.bytecount(size)))
2508 2512 # for backwards compat, name was partially encoded
2509 2513 ofp = self.sopener(store.decodedir(name), 'w')
2510 2514 for chunk in util.filechunkiter(fp, limit=size):
2511 2515 handled_bytes += len(chunk)
2512 2516 self.ui.progress(_('clone'), handled_bytes,
2513 2517 total=total_bytes)
2514 2518 ofp.write(chunk)
2515 2519 ofp.close()
2516 2520 elapsed = time.time() - start
2517 2521 if elapsed <= 0:
2518 2522 elapsed = 0.001
2519 2523 self.ui.progress(_('clone'), None)
2520 2524 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2521 2525 (util.bytecount(total_bytes), elapsed,
2522 2526 util.bytecount(total_bytes / elapsed)))
2523 2527
2524 2528 # new requirements = old non-format requirements +
2525 2529 # new format-related
2526 2530 # requirements from the streamed-in repository
2527 2531 requirements.update(set(self.requirements) - self.supportedformats)
2528 2532 self._applyrequirements(requirements)
2529 2533 self._writerequirements()
2530 2534
2531 2535 self.invalidate()
2532 2536 return len(self.heads()) + 1
2533 2537 finally:
2534 2538 lock.release()
2535 2539
2536 2540 def clone(self, remote, heads=[], stream=False):
2537 2541 '''clone remote repository.
2538 2542
2539 2543 keyword arguments:
2540 2544 heads: list of revs to clone (forces use of pull)
2541 2545 stream: use streaming clone if possible'''
2542 2546
2543 2547 # now, all clients that can request uncompressed clones can
2544 2548 # read repo formats supported by all servers that can serve
2545 2549 # them.
2546 2550
2547 2551 # if revlog format changes, client will have to check version
2548 2552 # and format flags on "stream" capability, and use
2549 2553 # uncompressed only if compatible.
2550 2554
2551 2555 if not stream:
2552 2556 # if the server explicitly prefers to stream (for fast LANs)
2553 2557 stream = remote.capable('stream-preferred')
2554 2558
2555 2559 if stream and not heads:
2556 2560 # 'stream' means remote revlog format is revlogv1 only
2557 2561 if remote.capable('stream'):
2558 2562 return self.stream_in(remote, set(('revlogv1',)))
2559 2563 # otherwise, 'streamreqs' contains the remote revlog format
2560 2564 streamreqs = remote.capable('streamreqs')
2561 2565 if streamreqs:
2562 2566 streamreqs = set(streamreqs.split(','))
2563 2567 # if we support it, stream in and adjust our requirements
2564 2568 if not streamreqs - self.supportedformats:
2565 2569 return self.stream_in(remote, streamreqs)
2566 2570 return self.pull(remote, heads)
2567 2571
2568 2572 def pushkey(self, namespace, key, old, new):
2569 2573 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2570 2574 old=old, new=new)
2571 2575 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2572 2576 ret = pushkey.push(self, namespace, key, old, new)
2573 2577 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2574 2578 ret=ret)
2575 2579 return ret
2576 2580
2577 2581 def listkeys(self, namespace):
2578 2582 self.hook('prelistkeys', throw=True, namespace=namespace)
2579 2583 self.ui.debug('listing keys for "%s"\n' % namespace)
2580 2584 values = pushkey.list(self, namespace)
2581 2585 self.hook('listkeys', namespace=namespace, values=values)
2582 2586 return values
2583 2587
2584 2588 def debugwireargs(self, one, two, three=None, four=None, five=None):
2585 2589 '''used to test argument passing over the wire'''
2586 2590 return "%s %s %s %s %s" % (one, two, three, four, five)
2587 2591
2588 2592 def savecommitmessage(self, text):
2589 2593 fp = self.opener('last-message.txt', 'wb')
2590 2594 try:
2591 2595 fp.write(text)
2592 2596 finally:
2593 2597 fp.close()
2594 2598 return self.pathto(fp.name[len(self.root)+1:])
2595 2599
2596 2600 # used to avoid circular references so destructors work
2597 2601 def aftertrans(files):
2598 2602 renamefiles = [tuple(t) for t in files]
2599 2603 def a():
2600 2604 for src, dest in renamefiles:
2601 2605 try:
2602 2606 util.rename(src, dest)
2603 2607 except OSError: # journal file does not yet exist
2604 2608 pass
2605 2609 return a
2606 2610
2607 2611 def undoname(fn):
2608 2612 base, name = os.path.split(fn)
2609 2613 assert name.startswith('journal')
2610 2614 return os.path.join(base, name.replace('journal', 'undo', 1))
2611 2615
2612 2616 def instance(ui, path, create):
2613 2617 return localrepository(ui, util.urllocalpath(path), create)
2614 2618
2615 2619 def islocal(path):
2616 2620 return True
General Comments 0
You need to be logged in to leave comments. Login now