##// END OF EJS Templates
clfilter: ensure cache invalidation is done on the main unfiltered repo...
Pierre-Yves David -
r17997:6089956e default
parent child Browse files
Show More
@@ -1,2646 +1,2648 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 def unfilteredmeth(orig):
27 27 """decorate method that always need to be run on unfiltered version"""
28 28 def wrapper(repo, *args, **kwargs):
29 29 return orig(repo.unfiltered(), *args, **kwargs)
30 30 return wrapper
31 31
32 32 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
33 33 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
34 34
35 35 class localpeer(peer.peerrepository):
36 36 '''peer for a local repo; reflects only the most recent API'''
37 37
38 38 def __init__(self, repo, caps=MODERNCAPS):
39 39 peer.peerrepository.__init__(self)
40 40 self._repo = repo
41 41 self.ui = repo.ui
42 42 self._caps = repo._restrictcapabilities(caps)
43 43 self.requirements = repo.requirements
44 44 self.supportedformats = repo.supportedformats
45 45
46 46 def close(self):
47 47 self._repo.close()
48 48
49 49 def _capabilities(self):
50 50 return self._caps
51 51
52 52 def local(self):
53 53 return self._repo
54 54
55 55 def canpush(self):
56 56 return True
57 57
58 58 def url(self):
59 59 return self._repo.url()
60 60
61 61 def lookup(self, key):
62 62 return self._repo.lookup(key)
63 63
64 64 def branchmap(self):
65 65 return discovery.visiblebranchmap(self._repo)
66 66
67 67 def heads(self):
68 68 return discovery.visibleheads(self._repo)
69 69
70 70 def known(self, nodes):
71 71 return self._repo.known(nodes)
72 72
73 73 def getbundle(self, source, heads=None, common=None):
74 74 return self._repo.getbundle(source, heads=heads, common=common)
75 75
76 76 # TODO We might want to move the next two calls into legacypeer and add
77 77 # unbundle instead.
78 78
79 79 def lock(self):
80 80 return self._repo.lock()
81 81
82 82 def addchangegroup(self, cg, source, url):
83 83 return self._repo.addchangegroup(cg, source, url)
84 84
85 85 def pushkey(self, namespace, key, old, new):
86 86 return self._repo.pushkey(namespace, key, old, new)
87 87
88 88 def listkeys(self, namespace):
89 89 return self._repo.listkeys(namespace)
90 90
91 91 def debugwireargs(self, one, two, three=None, four=None, five=None):
92 92 '''used to test argument passing over the wire'''
93 93 return "%s %s %s %s %s" % (one, two, three, four, five)
94 94
95 95 class locallegacypeer(localpeer):
96 96 '''peer extension which implements legacy methods too; used for tests with
97 97 restricted capabilities'''
98 98
99 99 def __init__(self, repo):
100 100 localpeer.__init__(self, repo, caps=LEGACYCAPS)
101 101
102 102 def branches(self, nodes):
103 103 return self._repo.branches(nodes)
104 104
105 105 def between(self, pairs):
106 106 return self._repo.between(pairs)
107 107
108 108 def changegroup(self, basenodes, source):
109 109 return self._repo.changegroup(basenodes, source)
110 110
111 111 def changegroupsubset(self, bases, heads, source):
112 112 return self._repo.changegroupsubset(bases, heads, source)
113 113
114 114 class localrepository(object):
115 115
116 116 supportedformats = set(('revlogv1', 'generaldelta'))
117 117 supported = supportedformats | set(('store', 'fncache', 'shared',
118 118 'dotencode'))
119 119 openerreqs = set(('revlogv1', 'generaldelta'))
120 120 requirements = ['revlogv1']
121 121
122 122 def _baserequirements(self, create):
123 123 return self.requirements[:]
124 124
125 125 def __init__(self, baseui, path=None, create=False):
126 126 self.wvfs = scmutil.vfs(path, expand=True)
127 127 self.wopener = self.wvfs
128 128 self.root = self.wvfs.base
129 129 self.path = self.wvfs.join(".hg")
130 130 self.origroot = path
131 131 self.auditor = scmutil.pathauditor(self.root, self._checknested)
132 132 self.vfs = scmutil.vfs(self.path)
133 133 self.opener = self.vfs
134 134 self.baseui = baseui
135 135 self.ui = baseui.copy()
136 136 # A list of callback to shape the phase if no data were found.
137 137 # Callback are in the form: func(repo, roots) --> processed root.
138 138 # This list it to be filled by extension during repo setup
139 139 self._phasedefaults = []
140 140 try:
141 141 self.ui.readconfig(self.join("hgrc"), self.root)
142 142 extensions.loadall(self.ui)
143 143 except IOError:
144 144 pass
145 145
146 146 if not self.vfs.isdir():
147 147 if create:
148 148 if not self.wvfs.exists():
149 149 self.wvfs.makedirs()
150 150 self.vfs.makedir(notindexed=True)
151 151 requirements = self._baserequirements(create)
152 152 if self.ui.configbool('format', 'usestore', True):
153 153 self.vfs.mkdir("store")
154 154 requirements.append("store")
155 155 if self.ui.configbool('format', 'usefncache', True):
156 156 requirements.append("fncache")
157 157 if self.ui.configbool('format', 'dotencode', True):
158 158 requirements.append('dotencode')
159 159 # create an invalid changelog
160 160 self.vfs.append(
161 161 "00changelog.i",
162 162 '\0\0\0\2' # represents revlogv2
163 163 ' dummy changelog to prevent using the old repo layout'
164 164 )
165 165 if self.ui.configbool('format', 'generaldelta', False):
166 166 requirements.append("generaldelta")
167 167 requirements = set(requirements)
168 168 else:
169 169 raise error.RepoError(_("repository %s not found") % path)
170 170 elif create:
171 171 raise error.RepoError(_("repository %s already exists") % path)
172 172 else:
173 173 try:
174 174 requirements = scmutil.readrequires(self.vfs, self.supported)
175 175 except IOError, inst:
176 176 if inst.errno != errno.ENOENT:
177 177 raise
178 178 requirements = set()
179 179
180 180 self.sharedpath = self.path
181 181 try:
182 182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
183 183 if not os.path.exists(s):
184 184 raise error.RepoError(
185 185 _('.hg/sharedpath points to nonexistent directory %s') % s)
186 186 self.sharedpath = s
187 187 except IOError, inst:
188 188 if inst.errno != errno.ENOENT:
189 189 raise
190 190
191 191 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
192 192 self.spath = self.store.path
193 193 self.svfs = self.store.vfs
194 194 self.sopener = self.svfs
195 195 self.sjoin = self.store.join
196 196 self.vfs.createmode = self.store.createmode
197 197 self._applyrequirements(requirements)
198 198 if create:
199 199 self._writerequirements()
200 200
201 201
202 202 self._branchcache = None
203 203 self._branchcachetip = None
204 204 self.filterpats = {}
205 205 self._datafilters = {}
206 206 self._transref = self._lockref = self._wlockref = None
207 207
208 208 # A cache for various files under .hg/ that tracks file changes,
209 209 # (used by the filecache decorator)
210 210 #
211 211 # Maps a property name to its util.filecacheentry
212 212 self._filecache = {}
213 213
214 214 def close(self):
215 215 pass
216 216
217 217 def _restrictcapabilities(self, caps):
218 218 return caps
219 219
220 220 def _applyrequirements(self, requirements):
221 221 self.requirements = requirements
222 222 self.sopener.options = dict((r, 1) for r in requirements
223 223 if r in self.openerreqs)
224 224
225 225 def _writerequirements(self):
226 226 reqfile = self.opener("requires", "w")
227 227 for r in self.requirements:
228 228 reqfile.write("%s\n" % r)
229 229 reqfile.close()
230 230
231 231 def _checknested(self, path):
232 232 """Determine if path is a legal nested repository."""
233 233 if not path.startswith(self.root):
234 234 return False
235 235 subpath = path[len(self.root) + 1:]
236 236 normsubpath = util.pconvert(subpath)
237 237
238 238 # XXX: Checking against the current working copy is wrong in
239 239 # the sense that it can reject things like
240 240 #
241 241 # $ hg cat -r 10 sub/x.txt
242 242 #
243 243 # if sub/ is no longer a subrepository in the working copy
244 244 # parent revision.
245 245 #
246 246 # However, it can of course also allow things that would have
247 247 # been rejected before, such as the above cat command if sub/
248 248 # is a subrepository now, but was a normal directory before.
249 249 # The old path auditor would have rejected by mistake since it
250 250 # panics when it sees sub/.hg/.
251 251 #
252 252 # All in all, checking against the working copy seems sensible
253 253 # since we want to prevent access to nested repositories on
254 254 # the filesystem *now*.
255 255 ctx = self[None]
256 256 parts = util.splitpath(subpath)
257 257 while parts:
258 258 prefix = '/'.join(parts)
259 259 if prefix in ctx.substate:
260 260 if prefix == normsubpath:
261 261 return True
262 262 else:
263 263 sub = ctx.sub(prefix)
264 264 return sub.checknested(subpath[len(prefix) + 1:])
265 265 else:
266 266 parts.pop()
267 267 return False
268 268
269 269 def peer(self):
270 270 return localpeer(self) # not cached to avoid reference cycle
271 271
272 272 def unfiltered(self):
273 273 """Return unfiltered version of the repository
274 274
275 275 Intended to be ovewritten by filtered repo."""
276 276 return self
277 277
278 278 @filecache('bookmarks')
279 279 def _bookmarks(self):
280 280 return bookmarks.bmstore(self)
281 281
282 282 @filecache('bookmarks.current')
283 283 def _bookmarkcurrent(self):
284 284 return bookmarks.readcurrent(self)
285 285
286 286 def bookmarkheads(self, bookmark):
287 287 name = bookmark.split('@', 1)[0]
288 288 heads = []
289 289 for mark, n in self._bookmarks.iteritems():
290 290 if mark.split('@', 1)[0] == name:
291 291 heads.append(n)
292 292 return heads
293 293
294 294 @storecache('phaseroots')
295 295 def _phasecache(self):
296 296 return phases.phasecache(self, self._phasedefaults)
297 297
298 298 @storecache('obsstore')
299 299 def obsstore(self):
300 300 store = obsolete.obsstore(self.sopener)
301 301 if store and not obsolete._enabled:
302 302 # message is rare enough to not be translated
303 303 msg = 'obsolete feature not enabled but %i markers found!\n'
304 304 self.ui.warn(msg % len(list(store)))
305 305 return store
306 306
307 307 @propertycache
308 308 def hiddenrevs(self):
309 309 """hiddenrevs: revs that should be hidden by command and tools
310 310
311 311 This set is carried on the repo to ease initialization and lazy
312 312 loading; it'll probably move back to changelog for efficiency and
313 313 consistency reasons.
314 314
315 315 Note that the hiddenrevs will needs invalidations when
316 316 - a new changesets is added (possible unstable above extinct)
317 317 - a new obsolete marker is added (possible new extinct changeset)
318 318
319 319 hidden changesets cannot have non-hidden descendants
320 320 """
321 321 hidden = set()
322 322 if self.obsstore:
323 323 ### hide extinct changeset that are not accessible by any mean
324 324 hiddenquery = 'extinct() - ::(. + bookmark())'
325 325 hidden.update(self.revs(hiddenquery))
326 326 return hidden
327 327
328 328 @storecache('00changelog.i')
329 329 def changelog(self):
330 330 c = changelog.changelog(self.sopener)
331 331 if 'HG_PENDING' in os.environ:
332 332 p = os.environ['HG_PENDING']
333 333 if p.startswith(self.root):
334 334 c.readpending('00changelog.i.a')
335 335 return c
336 336
337 337 @storecache('00manifest.i')
338 338 def manifest(self):
339 339 return manifest.manifest(self.sopener)
340 340
341 341 @filecache('dirstate')
342 342 def dirstate(self):
343 343 warned = [0]
344 344 def validate(node):
345 345 try:
346 346 self.changelog.rev(node)
347 347 return node
348 348 except error.LookupError:
349 349 if not warned[0]:
350 350 warned[0] = True
351 351 self.ui.warn(_("warning: ignoring unknown"
352 352 " working parent %s!\n") % short(node))
353 353 return nullid
354 354
355 355 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
356 356
357 357 def __getitem__(self, changeid):
358 358 if changeid is None:
359 359 return context.workingctx(self)
360 360 return context.changectx(self, changeid)
361 361
362 362 def __contains__(self, changeid):
363 363 try:
364 364 return bool(self.lookup(changeid))
365 365 except error.RepoLookupError:
366 366 return False
367 367
368 368 def __nonzero__(self):
369 369 return True
370 370
371 371 def __len__(self):
372 372 return len(self.changelog)
373 373
374 374 def __iter__(self):
375 375 return iter(self.changelog)
376 376
377 377 def revs(self, expr, *args):
378 378 '''Return a list of revisions matching the given revset'''
379 379 expr = revset.formatspec(expr, *args)
380 380 m = revset.match(None, expr)
381 381 return [r for r in m(self, list(self))]
382 382
383 383 def set(self, expr, *args):
384 384 '''
385 385 Yield a context for each matching revision, after doing arg
386 386 replacement via revset.formatspec
387 387 '''
388 388 for r in self.revs(expr, *args):
389 389 yield self[r]
390 390
391 391 def url(self):
392 392 return 'file:' + self.root
393 393
394 394 def hook(self, name, throw=False, **args):
395 395 return hook.hook(self.ui, self, name, throw, **args)
396 396
397 397 @unfilteredmeth
398 398 def _tag(self, names, node, message, local, user, date, extra={}):
399 399 if isinstance(names, str):
400 400 names = (names,)
401 401
402 402 branches = self.branchmap()
403 403 for name in names:
404 404 self.hook('pretag', throw=True, node=hex(node), tag=name,
405 405 local=local)
406 406 if name in branches:
407 407 self.ui.warn(_("warning: tag %s conflicts with existing"
408 408 " branch name\n") % name)
409 409
410 410 def writetags(fp, names, munge, prevtags):
411 411 fp.seek(0, 2)
412 412 if prevtags and prevtags[-1] != '\n':
413 413 fp.write('\n')
414 414 for name in names:
415 415 m = munge and munge(name) or name
416 416 if (self._tagscache.tagtypes and
417 417 name in self._tagscache.tagtypes):
418 418 old = self.tags().get(name, nullid)
419 419 fp.write('%s %s\n' % (hex(old), m))
420 420 fp.write('%s %s\n' % (hex(node), m))
421 421 fp.close()
422 422
423 423 prevtags = ''
424 424 if local:
425 425 try:
426 426 fp = self.opener('localtags', 'r+')
427 427 except IOError:
428 428 fp = self.opener('localtags', 'a')
429 429 else:
430 430 prevtags = fp.read()
431 431
432 432 # local tags are stored in the current charset
433 433 writetags(fp, names, None, prevtags)
434 434 for name in names:
435 435 self.hook('tag', node=hex(node), tag=name, local=local)
436 436 return
437 437
438 438 try:
439 439 fp = self.wfile('.hgtags', 'rb+')
440 440 except IOError, e:
441 441 if e.errno != errno.ENOENT:
442 442 raise
443 443 fp = self.wfile('.hgtags', 'ab')
444 444 else:
445 445 prevtags = fp.read()
446 446
447 447 # committed tags are stored in UTF-8
448 448 writetags(fp, names, encoding.fromlocal, prevtags)
449 449
450 450 fp.close()
451 451
452 452 self.invalidatecaches()
453 453
454 454 if '.hgtags' not in self.dirstate:
455 455 self[None].add(['.hgtags'])
456 456
457 457 m = matchmod.exact(self.root, '', ['.hgtags'])
458 458 tagnode = self.commit(message, user, date, extra=extra, match=m)
459 459
460 460 for name in names:
461 461 self.hook('tag', node=hex(node), tag=name, local=local)
462 462
463 463 return tagnode
464 464
465 465 def tag(self, names, node, message, local, user, date):
466 466 '''tag a revision with one or more symbolic names.
467 467
468 468 names is a list of strings or, when adding a single tag, names may be a
469 469 string.
470 470
471 471 if local is True, the tags are stored in a per-repository file.
472 472 otherwise, they are stored in the .hgtags file, and a new
473 473 changeset is committed with the change.
474 474
475 475 keyword arguments:
476 476
477 477 local: whether to store tags in non-version-controlled file
478 478 (default False)
479 479
480 480 message: commit message to use if committing
481 481
482 482 user: name of user to use if committing
483 483
484 484 date: date tuple to use if committing'''
485 485
486 486 if not local:
487 487 for x in self.status()[:5]:
488 488 if '.hgtags' in x:
489 489 raise util.Abort(_('working copy of .hgtags is changed '
490 490 '(please commit .hgtags manually)'))
491 491
492 492 self.tags() # instantiate the cache
493 493 self._tag(names, node, message, local, user, date)
494 494
495 495 @propertycache
496 496 def _tagscache(self):
497 497 '''Returns a tagscache object that contains various tags related
498 498 caches.'''
499 499
500 500 # This simplifies its cache management by having one decorated
501 501 # function (this one) and the rest simply fetch things from it.
502 502 class tagscache(object):
503 503 def __init__(self):
504 504 # These two define the set of tags for this repository. tags
505 505 # maps tag name to node; tagtypes maps tag name to 'global' or
506 506 # 'local'. (Global tags are defined by .hgtags across all
507 507 # heads, and local tags are defined in .hg/localtags.)
508 508 # They constitute the in-memory cache of tags.
509 509 self.tags = self.tagtypes = None
510 510
511 511 self.nodetagscache = self.tagslist = None
512 512
513 513 cache = tagscache()
514 514 cache.tags, cache.tagtypes = self._findtags()
515 515
516 516 return cache
517 517
518 518 def tags(self):
519 519 '''return a mapping of tag to node'''
520 520 t = {}
521 521 if self.changelog.filteredrevs:
522 522 tags, tt = self._findtags()
523 523 else:
524 524 tags = self._tagscache.tags
525 525 for k, v in tags.iteritems():
526 526 try:
527 527 # ignore tags to unknown nodes
528 528 self.changelog.rev(v)
529 529 t[k] = v
530 530 except (error.LookupError, ValueError):
531 531 pass
532 532 return t
533 533
534 534 def _findtags(self):
535 535 '''Do the hard work of finding tags. Return a pair of dicts
536 536 (tags, tagtypes) where tags maps tag name to node, and tagtypes
537 537 maps tag name to a string like \'global\' or \'local\'.
538 538 Subclasses or extensions are free to add their own tags, but
539 539 should be aware that the returned dicts will be retained for the
540 540 duration of the localrepo object.'''
541 541
542 542 # XXX what tagtype should subclasses/extensions use? Currently
543 543 # mq and bookmarks add tags, but do not set the tagtype at all.
544 544 # Should each extension invent its own tag type? Should there
545 545 # be one tagtype for all such "virtual" tags? Or is the status
546 546 # quo fine?
547 547
548 548 alltags = {} # map tag name to (node, hist)
549 549 tagtypes = {}
550 550
551 551 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
552 552 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
553 553
554 554 # Build the return dicts. Have to re-encode tag names because
555 555 # the tags module always uses UTF-8 (in order not to lose info
556 556 # writing to the cache), but the rest of Mercurial wants them in
557 557 # local encoding.
558 558 tags = {}
559 559 for (name, (node, hist)) in alltags.iteritems():
560 560 if node != nullid:
561 561 tags[encoding.tolocal(name)] = node
562 562 tags['tip'] = self.changelog.tip()
563 563 tagtypes = dict([(encoding.tolocal(name), value)
564 564 for (name, value) in tagtypes.iteritems()])
565 565 return (tags, tagtypes)
566 566
567 567 def tagtype(self, tagname):
568 568 '''
569 569 return the type of the given tag. result can be:
570 570
571 571 'local' : a local tag
572 572 'global' : a global tag
573 573 None : tag does not exist
574 574 '''
575 575
576 576 return self._tagscache.tagtypes.get(tagname)
577 577
578 578 def tagslist(self):
579 579 '''return a list of tags ordered by revision'''
580 580 if not self._tagscache.tagslist:
581 581 l = []
582 582 for t, n in self.tags().iteritems():
583 583 r = self.changelog.rev(n)
584 584 l.append((r, t, n))
585 585 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
586 586
587 587 return self._tagscache.tagslist
588 588
589 589 def nodetags(self, node):
590 590 '''return the tags associated with a node'''
591 591 if not self._tagscache.nodetagscache:
592 592 nodetagscache = {}
593 593 for t, n in self._tagscache.tags.iteritems():
594 594 nodetagscache.setdefault(n, []).append(t)
595 595 for tags in nodetagscache.itervalues():
596 596 tags.sort()
597 597 self._tagscache.nodetagscache = nodetagscache
598 598 return self._tagscache.nodetagscache.get(node, [])
599 599
600 600 def nodebookmarks(self, node):
601 601 marks = []
602 602 for bookmark, n in self._bookmarks.iteritems():
603 603 if n == node:
604 604 marks.append(bookmark)
605 605 return sorted(marks)
606 606
607 607 def _branchtags(self, partial, lrev):
608 608 # TODO: rename this function?
609 609 tiprev = len(self) - 1
610 610 if lrev != tiprev:
611 611 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
612 612 self._updatebranchcache(partial, ctxgen)
613 613 self._writebranchcache(partial, self.changelog.tip(), tiprev)
614 614
615 615 return partial
616 616
617 617 @unfilteredmeth # Until we get a smarter cache management
618 618 def updatebranchcache(self):
619 619 tip = self.changelog.tip()
620 620 if self._branchcache is not None and self._branchcachetip == tip:
621 621 return
622 622
623 623 oldtip = self._branchcachetip
624 624 self._branchcachetip = tip
625 625 if oldtip is None or oldtip not in self.changelog.nodemap:
626 626 partial, last, lrev = self._readbranchcache()
627 627 else:
628 628 lrev = self.changelog.rev(oldtip)
629 629 partial = self._branchcache
630 630
631 631 self._branchtags(partial, lrev)
632 632 # this private cache holds all heads (not just the branch tips)
633 633 self._branchcache = partial
634 634
635 635 def branchmap(self):
636 636 '''returns a dictionary {branch: [branchheads]}'''
637 637 if self.changelog.filteredrevs:
638 638 # some changeset are excluded we can't use the cache
639 639 branchmap = {}
640 640 self._updatebranchcache(branchmap, (self[r] for r in self))
641 641 return branchmap
642 642 else:
643 643 self.updatebranchcache()
644 644 return self._branchcache
645 645
646 646
647 647 def _branchtip(self, heads):
648 648 '''return the tipmost branch head in heads'''
649 649 tip = heads[-1]
650 650 for h in reversed(heads):
651 651 if not self[h].closesbranch():
652 652 tip = h
653 653 break
654 654 return tip
655 655
656 656 def branchtip(self, branch):
657 657 '''return the tip node for a given branch'''
658 658 if branch not in self.branchmap():
659 659 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
660 660 return self._branchtip(self.branchmap()[branch])
661 661
662 662 def branchtags(self):
663 663 '''return a dict where branch names map to the tipmost head of
664 664 the branch, open heads come before closed'''
665 665 bt = {}
666 666 for bn, heads in self.branchmap().iteritems():
667 667 bt[bn] = self._branchtip(heads)
668 668 return bt
669 669
670 670 @unfilteredmeth # Until we get a smarter cache management
671 671 def _readbranchcache(self):
672 672 partial = {}
673 673 try:
674 674 f = self.opener("cache/branchheads")
675 675 lines = f.read().split('\n')
676 676 f.close()
677 677 except (IOError, OSError):
678 678 return {}, nullid, nullrev
679 679
680 680 try:
681 681 last, lrev = lines.pop(0).split(" ", 1)
682 682 last, lrev = bin(last), int(lrev)
683 683 if lrev >= len(self) or self[lrev].node() != last:
684 684 # invalidate the cache
685 685 raise ValueError('invalidating branch cache (tip differs)')
686 686 for l in lines:
687 687 if not l:
688 688 continue
689 689 node, label = l.split(" ", 1)
690 690 label = encoding.tolocal(label.strip())
691 691 if not node in self:
692 692 raise ValueError('invalidating branch cache because node '+
693 693 '%s does not exist' % node)
694 694 partial.setdefault(label, []).append(bin(node))
695 695 except KeyboardInterrupt:
696 696 raise
697 697 except Exception, inst:
698 698 if self.ui.debugflag:
699 699 self.ui.warn(str(inst), '\n')
700 700 partial, last, lrev = {}, nullid, nullrev
701 701 return partial, last, lrev
702 702
703 703 @unfilteredmeth # Until we get a smarter cache management
704 704 def _writebranchcache(self, branches, tip, tiprev):
705 705 try:
706 706 f = self.opener("cache/branchheads", "w", atomictemp=True)
707 707 f.write("%s %s\n" % (hex(tip), tiprev))
708 708 for label, nodes in branches.iteritems():
709 709 for node in nodes:
710 710 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
711 711 f.close()
712 712 except (IOError, OSError):
713 713 pass
714 714
715 715 @unfilteredmeth # Until we get a smarter cache management
716 716 def _updatebranchcache(self, partial, ctxgen):
717 717 """Given a branchhead cache, partial, that may have extra nodes or be
718 718 missing heads, and a generator of nodes that are at least a superset of
719 719 heads missing, this function updates partial to be correct.
720 720 """
721 721 # collect new branch entries
722 722 newbranches = {}
723 723 for c in ctxgen:
724 724 newbranches.setdefault(c.branch(), []).append(c.node())
725 725 # if older branchheads are reachable from new ones, they aren't
726 726 # really branchheads. Note checking parents is insufficient:
727 727 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
728 728 for branch, newnodes in newbranches.iteritems():
729 729 bheads = partial.setdefault(branch, [])
730 730 # Remove candidate heads that no longer are in the repo (e.g., as
731 731 # the result of a strip that just happened). Avoid using 'node in
732 732 # self' here because that dives down into branchcache code somewhat
733 733 # recursively.
734 734 bheadrevs = [self.changelog.rev(node) for node in bheads
735 735 if self.changelog.hasnode(node)]
736 736 newheadrevs = [self.changelog.rev(node) for node in newnodes
737 737 if self.changelog.hasnode(node)]
738 738 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
739 739 # Remove duplicates - nodes that are in newheadrevs and are already
740 740 # in bheadrevs. This can happen if you strip a node whose parent
741 741 # was already a head (because they're on different branches).
742 742 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
743 743
744 744 # Starting from tip means fewer passes over reachable. If we know
745 745 # the new candidates are not ancestors of existing heads, we don't
746 746 # have to examine ancestors of existing heads
747 747 if ctxisnew:
748 748 iterrevs = sorted(newheadrevs)
749 749 else:
750 750 iterrevs = list(bheadrevs)
751 751
752 752 # This loop prunes out two kinds of heads - heads that are
753 753 # superseded by a head in newheadrevs, and newheadrevs that are not
754 754 # heads because an existing head is their descendant.
755 755 while iterrevs:
756 756 latest = iterrevs.pop()
757 757 if latest not in bheadrevs:
758 758 continue
759 759 ancestors = set(self.changelog.ancestors([latest],
760 760 bheadrevs[0]))
761 761 if ancestors:
762 762 bheadrevs = [b for b in bheadrevs if b not in ancestors]
763 763 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
764 764
765 765 # There may be branches that cease to exist when the last commit in the
766 766 # branch was stripped. This code filters them out. Note that the
767 767 # branch that ceased to exist may not be in newbranches because
768 768 # newbranches is the set of candidate heads, which when you strip the
769 769 # last commit in a branch will be the parent branch.
770 770 for branch in partial.keys():
771 771 nodes = [head for head in partial[branch]
772 772 if self.changelog.hasnode(head)]
773 773 if not nodes:
774 774 del partial[branch]
775 775
776 776 def lookup(self, key):
777 777 return self[key].node()
778 778
779 779 def lookupbranch(self, key, remote=None):
780 780 repo = remote or self
781 781 if key in repo.branchmap():
782 782 return key
783 783
784 784 repo = (remote and remote.local()) and remote or self
785 785 return repo[key].branch()
786 786
787 787 def known(self, nodes):
788 788 nm = self.changelog.nodemap
789 789 pc = self._phasecache
790 790 result = []
791 791 for n in nodes:
792 792 r = nm.get(n)
793 793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 794 result.append(resp)
795 795 return result
796 796
797 797 def local(self):
798 798 return self
799 799
800 800 def cancopy(self):
801 801 return self.local() # so statichttprepo's override of local() works
802 802
803 803 def join(self, f):
804 804 return os.path.join(self.path, f)
805 805
806 806 def wjoin(self, f):
807 807 return os.path.join(self.root, f)
808 808
809 809 def file(self, f):
810 810 if f[0] == '/':
811 811 f = f[1:]
812 812 return filelog.filelog(self.sopener, f)
813 813
814 814 def changectx(self, changeid):
815 815 return self[changeid]
816 816
817 817 def parents(self, changeid=None):
818 818 '''get list of changectxs for parents of changeid'''
819 819 return self[changeid].parents()
820 820
821 821 def setparents(self, p1, p2=nullid):
822 822 copies = self.dirstate.setparents(p1, p2)
823 823 if copies:
824 824 # Adjust copy records, the dirstate cannot do it, it
825 825 # requires access to parents manifests. Preserve them
826 826 # only for entries added to first parent.
827 827 pctx = self[p1]
828 828 for f in copies:
829 829 if f not in pctx and copies[f] in pctx:
830 830 self.dirstate.copy(copies[f], f)
831 831
832 832 def filectx(self, path, changeid=None, fileid=None):
833 833 """changeid can be a changeset revision, node, or tag.
834 834 fileid can be a file revision or node."""
835 835 return context.filectx(self, path, changeid, fileid)
836 836
837 837 def getcwd(self):
838 838 return self.dirstate.getcwd()
839 839
840 840 def pathto(self, f, cwd=None):
841 841 return self.dirstate.pathto(f, cwd)
842 842
843 843 def wfile(self, f, mode='r'):
844 844 return self.wopener(f, mode)
845 845
846 846 def _link(self, f):
847 847 return os.path.islink(self.wjoin(f))
848 848
849 849 def _loadfilter(self, filter):
850 850 if filter not in self.filterpats:
851 851 l = []
852 852 for pat, cmd in self.ui.configitems(filter):
853 853 if cmd == '!':
854 854 continue
855 855 mf = matchmod.match(self.root, '', [pat])
856 856 fn = None
857 857 params = cmd
858 858 for name, filterfn in self._datafilters.iteritems():
859 859 if cmd.startswith(name):
860 860 fn = filterfn
861 861 params = cmd[len(name):].lstrip()
862 862 break
863 863 if not fn:
864 864 fn = lambda s, c, **kwargs: util.filter(s, c)
865 865 # Wrap old filters not supporting keyword arguments
866 866 if not inspect.getargspec(fn)[2]:
867 867 oldfn = fn
868 868 fn = lambda s, c, **kwargs: oldfn(s, c)
869 869 l.append((mf, fn, params))
870 870 self.filterpats[filter] = l
871 871 return self.filterpats[filter]
872 872
873 873 def _filter(self, filterpats, filename, data):
874 874 for mf, fn, cmd in filterpats:
875 875 if mf(filename):
876 876 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
877 877 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
878 878 break
879 879
880 880 return data
881 881
882 882 @propertycache
883 883 def _encodefilterpats(self):
884 884 return self._loadfilter('encode')
885 885
886 886 @propertycache
887 887 def _decodefilterpats(self):
888 888 return self._loadfilter('decode')
889 889
890 890 def adddatafilter(self, name, filter):
891 891 self._datafilters[name] = filter
892 892
893 893 def wread(self, filename):
894 894 if self._link(filename):
895 895 data = os.readlink(self.wjoin(filename))
896 896 else:
897 897 data = self.wopener.read(filename)
898 898 return self._filter(self._encodefilterpats, filename, data)
899 899
900 900 def wwrite(self, filename, data, flags):
901 901 data = self._filter(self._decodefilterpats, filename, data)
902 902 if 'l' in flags:
903 903 self.wopener.symlink(data, filename)
904 904 else:
905 905 self.wopener.write(filename, data)
906 906 if 'x' in flags:
907 907 util.setflags(self.wjoin(filename), False, True)
908 908
909 909 def wwritedata(self, filename, data):
910 910 return self._filter(self._decodefilterpats, filename, data)
911 911
912 912 def transaction(self, desc):
913 913 tr = self._transref and self._transref() or None
914 914 if tr and tr.running():
915 915 return tr.nest()
916 916
917 917 # abort here if the journal already exists
918 918 if os.path.exists(self.sjoin("journal")):
919 919 raise error.RepoError(
920 920 _("abandoned transaction found - run hg recover"))
921 921
922 922 self._writejournal(desc)
923 923 renames = [(x, undoname(x)) for x in self._journalfiles()]
924 924
925 925 tr = transaction.transaction(self.ui.warn, self.sopener,
926 926 self.sjoin("journal"),
927 927 aftertrans(renames),
928 928 self.store.createmode)
929 929 self._transref = weakref.ref(tr)
930 930 return tr
931 931
932 932 def _journalfiles(self):
933 933 return (self.sjoin('journal'), self.join('journal.dirstate'),
934 934 self.join('journal.branch'), self.join('journal.desc'),
935 935 self.join('journal.bookmarks'),
936 936 self.sjoin('journal.phaseroots'))
937 937
938 938 def undofiles(self):
939 939 return [undoname(x) for x in self._journalfiles()]
940 940
941 941 def _writejournal(self, desc):
942 942 self.opener.write("journal.dirstate",
943 943 self.opener.tryread("dirstate"))
944 944 self.opener.write("journal.branch",
945 945 encoding.fromlocal(self.dirstate.branch()))
946 946 self.opener.write("journal.desc",
947 947 "%d\n%s\n" % (len(self), desc))
948 948 self.opener.write("journal.bookmarks",
949 949 self.opener.tryread("bookmarks"))
950 950 self.sopener.write("journal.phaseroots",
951 951 self.sopener.tryread("phaseroots"))
952 952
953 953 def recover(self):
954 954 lock = self.lock()
955 955 try:
956 956 if os.path.exists(self.sjoin("journal")):
957 957 self.ui.status(_("rolling back interrupted transaction\n"))
958 958 transaction.rollback(self.sopener, self.sjoin("journal"),
959 959 self.ui.warn)
960 960 self.invalidate()
961 961 return True
962 962 else:
963 963 self.ui.warn(_("no interrupted transaction available\n"))
964 964 return False
965 965 finally:
966 966 lock.release()
967 967
968 968 def rollback(self, dryrun=False, force=False):
969 969 wlock = lock = None
970 970 try:
971 971 wlock = self.wlock()
972 972 lock = self.lock()
973 973 if os.path.exists(self.sjoin("undo")):
974 974 return self._rollback(dryrun, force)
975 975 else:
976 976 self.ui.warn(_("no rollback information available\n"))
977 977 return 1
978 978 finally:
979 979 release(lock, wlock)
980 980
981 981 def _rollback(self, dryrun, force):
982 982 ui = self.ui
983 983 try:
984 984 args = self.opener.read('undo.desc').splitlines()
985 985 (oldlen, desc, detail) = (int(args[0]), args[1], None)
986 986 if len(args) >= 3:
987 987 detail = args[2]
988 988 oldtip = oldlen - 1
989 989
990 990 if detail and ui.verbose:
991 991 msg = (_('repository tip rolled back to revision %s'
992 992 ' (undo %s: %s)\n')
993 993 % (oldtip, desc, detail))
994 994 else:
995 995 msg = (_('repository tip rolled back to revision %s'
996 996 ' (undo %s)\n')
997 997 % (oldtip, desc))
998 998 except IOError:
999 999 msg = _('rolling back unknown transaction\n')
1000 1000 desc = None
1001 1001
1002 1002 if not force and self['.'] != self['tip'] and desc == 'commit':
1003 1003 raise util.Abort(
1004 1004 _('rollback of last commit while not checked out '
1005 1005 'may lose data'), hint=_('use -f to force'))
1006 1006
1007 1007 ui.status(msg)
1008 1008 if dryrun:
1009 1009 return 0
1010 1010
1011 1011 parents = self.dirstate.parents()
1012 1012 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1013 1013 if os.path.exists(self.join('undo.bookmarks')):
1014 1014 util.rename(self.join('undo.bookmarks'),
1015 1015 self.join('bookmarks'))
1016 1016 if os.path.exists(self.sjoin('undo.phaseroots')):
1017 1017 util.rename(self.sjoin('undo.phaseroots'),
1018 1018 self.sjoin('phaseroots'))
1019 1019 self.invalidate()
1020 1020
1021 1021 # Discard all cache entries to force reloading everything.
1022 1022 self._filecache.clear()
1023 1023
1024 1024 parentgone = (parents[0] not in self.changelog.nodemap or
1025 1025 parents[1] not in self.changelog.nodemap)
1026 1026 if parentgone:
1027 1027 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1028 1028 try:
1029 1029 branch = self.opener.read('undo.branch')
1030 1030 self.dirstate.setbranch(encoding.tolocal(branch))
1031 1031 except IOError:
1032 1032 ui.warn(_('named branch could not be reset: '
1033 1033 'current branch is still \'%s\'\n')
1034 1034 % self.dirstate.branch())
1035 1035
1036 1036 self.dirstate.invalidate()
1037 1037 parents = tuple([p.rev() for p in self.parents()])
1038 1038 if len(parents) > 1:
1039 1039 ui.status(_('working directory now based on '
1040 1040 'revisions %d and %d\n') % parents)
1041 1041 else:
1042 1042 ui.status(_('working directory now based on '
1043 1043 'revision %d\n') % parents)
1044 1044 # TODO: if we know which new heads may result from this rollback, pass
1045 1045 # them to destroy(), which will prevent the branchhead cache from being
1046 1046 # invalidated.
1047 1047 self.destroyed()
1048 1048 return 0
1049 1049
1050 1050 def invalidatecaches(self):
1051 1051 def delcache(name):
1052 1052 try:
1053 1053 delattr(self, name)
1054 1054 except AttributeError:
1055 1055 pass
1056 1056
1057 1057 delcache('_tagscache')
1058 1058
1059 1059 self.unfiltered()._branchcache = None # in UTF-8
1060 1060 self.unfiltered()._branchcachetip = None
1061 1061 obsolete.clearobscaches(self)
1062 1062
1063 1063 def invalidatedirstate(self):
1064 1064 '''Invalidates the dirstate, causing the next call to dirstate
1065 1065 to check if it was modified since the last time it was read,
1066 1066 rereading it if it has.
1067 1067
1068 1068 This is different to dirstate.invalidate() that it doesn't always
1069 1069 rereads the dirstate. Use dirstate.invalidate() if you want to
1070 1070 explicitly read the dirstate again (i.e. restoring it to a previous
1071 1071 known good state).'''
1072 1072 if 'dirstate' in self.__dict__:
1073 1073 for k in self.dirstate._filecache:
1074 1074 try:
1075 1075 delattr(self.dirstate, k)
1076 1076 except AttributeError:
1077 1077 pass
1078 delattr(self, 'dirstate')
1078 delattr(self.unfiltered(), 'dirstate')
1079 1079
1080 1080 def invalidate(self):
1081 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1081 1082 for k in self._filecache:
1082 1083 # dirstate is invalidated separately in invalidatedirstate()
1083 1084 if k == 'dirstate':
1084 1085 continue
1085 1086
1086 1087 try:
1087 delattr(self, k)
1088 delattr(unfiltered, k)
1088 1089 except AttributeError:
1089 1090 pass
1090 1091 self.invalidatecaches()
1091 1092
1092 1093 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1093 1094 try:
1094 1095 l = lock.lock(lockname, 0, releasefn, desc=desc)
1095 1096 except error.LockHeld, inst:
1096 1097 if not wait:
1097 1098 raise
1098 1099 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1099 1100 (desc, inst.locker))
1100 1101 # default to 600 seconds timeout
1101 1102 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1102 1103 releasefn, desc=desc)
1103 1104 if acquirefn:
1104 1105 acquirefn()
1105 1106 return l
1106 1107
1107 1108 def _afterlock(self, callback):
1108 1109 """add a callback to the current repository lock.
1109 1110
1110 1111 The callback will be executed on lock release."""
1111 1112 l = self._lockref and self._lockref()
1112 1113 if l:
1113 1114 l.postrelease.append(callback)
1114 1115 else:
1115 1116 callback()
1116 1117
1117 1118 def lock(self, wait=True):
1118 1119 '''Lock the repository store (.hg/store) and return a weak reference
1119 1120 to the lock. Use this before modifying the store (e.g. committing or
1120 1121 stripping). If you are opening a transaction, get a lock as well.)'''
1121 1122 l = self._lockref and self._lockref()
1122 1123 if l is not None and l.held:
1123 1124 l.lock()
1124 1125 return l
1125 1126
1126 1127 def unlock():
1127 1128 self.store.write()
1128 1129 if '_phasecache' in vars(self):
1129 1130 self._phasecache.write()
1130 1131 for k, ce in self._filecache.items():
1131 1132 if k == 'dirstate':
1132 1133 continue
1133 1134 ce.refresh()
1134 1135
1135 1136 l = self._lock(self.sjoin("lock"), wait, unlock,
1136 1137 self.invalidate, _('repository %s') % self.origroot)
1137 1138 self._lockref = weakref.ref(l)
1138 1139 return l
1139 1140
1140 1141 def wlock(self, wait=True):
1141 1142 '''Lock the non-store parts of the repository (everything under
1142 1143 .hg except .hg/store) and return a weak reference to the lock.
1143 1144 Use this before modifying files in .hg.'''
1144 1145 l = self._wlockref and self._wlockref()
1145 1146 if l is not None and l.held:
1146 1147 l.lock()
1147 1148 return l
1148 1149
1149 1150 def unlock():
1150 1151 self.dirstate.write()
1151 1152 ce = self._filecache.get('dirstate')
1152 1153 if ce:
1153 1154 ce.refresh()
1154 1155
1155 1156 l = self._lock(self.join("wlock"), wait, unlock,
1156 1157 self.invalidatedirstate, _('working directory of %s') %
1157 1158 self.origroot)
1158 1159 self._wlockref = weakref.ref(l)
1159 1160 return l
1160 1161
1161 1162 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1162 1163 """
1163 1164 commit an individual file as part of a larger transaction
1164 1165 """
1165 1166
1166 1167 fname = fctx.path()
1167 1168 text = fctx.data()
1168 1169 flog = self.file(fname)
1169 1170 fparent1 = manifest1.get(fname, nullid)
1170 1171 fparent2 = fparent2o = manifest2.get(fname, nullid)
1171 1172
1172 1173 meta = {}
1173 1174 copy = fctx.renamed()
1174 1175 if copy and copy[0] != fname:
1175 1176 # Mark the new revision of this file as a copy of another
1176 1177 # file. This copy data will effectively act as a parent
1177 1178 # of this new revision. If this is a merge, the first
1178 1179 # parent will be the nullid (meaning "look up the copy data")
1179 1180 # and the second one will be the other parent. For example:
1180 1181 #
1181 1182 # 0 --- 1 --- 3 rev1 changes file foo
1182 1183 # \ / rev2 renames foo to bar and changes it
1183 1184 # \- 2 -/ rev3 should have bar with all changes and
1184 1185 # should record that bar descends from
1185 1186 # bar in rev2 and foo in rev1
1186 1187 #
1187 1188 # this allows this merge to succeed:
1188 1189 #
1189 1190 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1190 1191 # \ / merging rev3 and rev4 should use bar@rev2
1191 1192 # \- 2 --- 4 as the merge base
1192 1193 #
1193 1194
1194 1195 cfname = copy[0]
1195 1196 crev = manifest1.get(cfname)
1196 1197 newfparent = fparent2
1197 1198
1198 1199 if manifest2: # branch merge
1199 1200 if fparent2 == nullid or crev is None: # copied on remote side
1200 1201 if cfname in manifest2:
1201 1202 crev = manifest2[cfname]
1202 1203 newfparent = fparent1
1203 1204
1204 1205 # find source in nearest ancestor if we've lost track
1205 1206 if not crev:
1206 1207 self.ui.debug(" %s: searching for copy revision for %s\n" %
1207 1208 (fname, cfname))
1208 1209 for ancestor in self[None].ancestors():
1209 1210 if cfname in ancestor:
1210 1211 crev = ancestor[cfname].filenode()
1211 1212 break
1212 1213
1213 1214 if crev:
1214 1215 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1215 1216 meta["copy"] = cfname
1216 1217 meta["copyrev"] = hex(crev)
1217 1218 fparent1, fparent2 = nullid, newfparent
1218 1219 else:
1219 1220 self.ui.warn(_("warning: can't find ancestor for '%s' "
1220 1221 "copied from '%s'!\n") % (fname, cfname))
1221 1222
1222 1223 elif fparent2 != nullid:
1223 1224 # is one parent an ancestor of the other?
1224 1225 fparentancestor = flog.ancestor(fparent1, fparent2)
1225 1226 if fparentancestor == fparent1:
1226 1227 fparent1, fparent2 = fparent2, nullid
1227 1228 elif fparentancestor == fparent2:
1228 1229 fparent2 = nullid
1229 1230
1230 1231 # is the file changed?
1231 1232 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1232 1233 changelist.append(fname)
1233 1234 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1234 1235
1235 1236 # are just the flags changed during merge?
1236 1237 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1237 1238 changelist.append(fname)
1238 1239
1239 1240 return fparent1
1240 1241
1241 1242 def commit(self, text="", user=None, date=None, match=None, force=False,
1242 1243 editor=False, extra={}):
1243 1244 """Add a new revision to current repository.
1244 1245
1245 1246 Revision information is gathered from the working directory,
1246 1247 match can be used to filter the committed files. If editor is
1247 1248 supplied, it is called to get a commit message.
1248 1249 """
1249 1250
1250 1251 def fail(f, msg):
1251 1252 raise util.Abort('%s: %s' % (f, msg))
1252 1253
1253 1254 if not match:
1254 1255 match = matchmod.always(self.root, '')
1255 1256
1256 1257 if not force:
1257 1258 vdirs = []
1258 1259 match.dir = vdirs.append
1259 1260 match.bad = fail
1260 1261
1261 1262 wlock = self.wlock()
1262 1263 try:
1263 1264 wctx = self[None]
1264 1265 merge = len(wctx.parents()) > 1
1265 1266
1266 1267 if (not force and merge and match and
1267 1268 (match.files() or match.anypats())):
1268 1269 raise util.Abort(_('cannot partially commit a merge '
1269 1270 '(do not specify files or patterns)'))
1270 1271
1271 1272 changes = self.status(match=match, clean=force)
1272 1273 if force:
1273 1274 changes[0].extend(changes[6]) # mq may commit unchanged files
1274 1275
1275 1276 # check subrepos
1276 1277 subs = []
1277 1278 commitsubs = set()
1278 1279 newstate = wctx.substate.copy()
1279 1280 # only manage subrepos and .hgsubstate if .hgsub is present
1280 1281 if '.hgsub' in wctx:
1281 1282 # we'll decide whether to track this ourselves, thanks
1282 1283 if '.hgsubstate' in changes[0]:
1283 1284 changes[0].remove('.hgsubstate')
1284 1285 if '.hgsubstate' in changes[2]:
1285 1286 changes[2].remove('.hgsubstate')
1286 1287
1287 1288 # compare current state to last committed state
1288 1289 # build new substate based on last committed state
1289 1290 oldstate = wctx.p1().substate
1290 1291 for s in sorted(newstate.keys()):
1291 1292 if not match(s):
1292 1293 # ignore working copy, use old state if present
1293 1294 if s in oldstate:
1294 1295 newstate[s] = oldstate[s]
1295 1296 continue
1296 1297 if not force:
1297 1298 raise util.Abort(
1298 1299 _("commit with new subrepo %s excluded") % s)
1299 1300 if wctx.sub(s).dirty(True):
1300 1301 if not self.ui.configbool('ui', 'commitsubrepos'):
1301 1302 raise util.Abort(
1302 1303 _("uncommitted changes in subrepo %s") % s,
1303 1304 hint=_("use --subrepos for recursive commit"))
1304 1305 subs.append(s)
1305 1306 commitsubs.add(s)
1306 1307 else:
1307 1308 bs = wctx.sub(s).basestate()
1308 1309 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1309 1310 if oldstate.get(s, (None, None, None))[1] != bs:
1310 1311 subs.append(s)
1311 1312
1312 1313 # check for removed subrepos
1313 1314 for p in wctx.parents():
1314 1315 r = [s for s in p.substate if s not in newstate]
1315 1316 subs += [s for s in r if match(s)]
1316 1317 if subs:
1317 1318 if (not match('.hgsub') and
1318 1319 '.hgsub' in (wctx.modified() + wctx.added())):
1319 1320 raise util.Abort(
1320 1321 _("can't commit subrepos without .hgsub"))
1321 1322 changes[0].insert(0, '.hgsubstate')
1322 1323
1323 1324 elif '.hgsub' in changes[2]:
1324 1325 # clean up .hgsubstate when .hgsub is removed
1325 1326 if ('.hgsubstate' in wctx and
1326 1327 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1327 1328 changes[2].insert(0, '.hgsubstate')
1328 1329
1329 1330 # make sure all explicit patterns are matched
1330 1331 if not force and match.files():
1331 1332 matched = set(changes[0] + changes[1] + changes[2])
1332 1333
1333 1334 for f in match.files():
1334 1335 f = self.dirstate.normalize(f)
1335 1336 if f == '.' or f in matched or f in wctx.substate:
1336 1337 continue
1337 1338 if f in changes[3]: # missing
1338 1339 fail(f, _('file not found!'))
1339 1340 if f in vdirs: # visited directory
1340 1341 d = f + '/'
1341 1342 for mf in matched:
1342 1343 if mf.startswith(d):
1343 1344 break
1344 1345 else:
1345 1346 fail(f, _("no match under directory!"))
1346 1347 elif f not in self.dirstate:
1347 1348 fail(f, _("file not tracked!"))
1348 1349
1349 1350 if (not force and not extra.get("close") and not merge
1350 1351 and not (changes[0] or changes[1] or changes[2])
1351 1352 and wctx.branch() == wctx.p1().branch()):
1352 1353 return None
1353 1354
1354 1355 if merge and changes[3]:
1355 1356 raise util.Abort(_("cannot commit merge with missing files"))
1356 1357
1357 1358 ms = mergemod.mergestate(self)
1358 1359 for f in changes[0]:
1359 1360 if f in ms and ms[f] == 'u':
1360 1361 raise util.Abort(_("unresolved merge conflicts "
1361 1362 "(see hg help resolve)"))
1362 1363
1363 1364 cctx = context.workingctx(self, text, user, date, extra, changes)
1364 1365 if editor:
1365 1366 cctx._text = editor(self, cctx, subs)
1366 1367 edited = (text != cctx._text)
1367 1368
1368 1369 # commit subs and write new state
1369 1370 if subs:
1370 1371 for s in sorted(commitsubs):
1371 1372 sub = wctx.sub(s)
1372 1373 self.ui.status(_('committing subrepository %s\n') %
1373 1374 subrepo.subrelpath(sub))
1374 1375 sr = sub.commit(cctx._text, user, date)
1375 1376 newstate[s] = (newstate[s][0], sr)
1376 1377 subrepo.writestate(self, newstate)
1377 1378
1378 1379 # Save commit message in case this transaction gets rolled back
1379 1380 # (e.g. by a pretxncommit hook). Leave the content alone on
1380 1381 # the assumption that the user will use the same editor again.
1381 1382 msgfn = self.savecommitmessage(cctx._text)
1382 1383
1383 1384 p1, p2 = self.dirstate.parents()
1384 1385 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1385 1386 try:
1386 1387 self.hook("precommit", throw=True, parent1=hookp1,
1387 1388 parent2=hookp2)
1388 1389 ret = self.commitctx(cctx, True)
1389 1390 except: # re-raises
1390 1391 if edited:
1391 1392 self.ui.write(
1392 1393 _('note: commit message saved in %s\n') % msgfn)
1393 1394 raise
1394 1395
1395 1396 # update bookmarks, dirstate and mergestate
1396 1397 bookmarks.update(self, [p1, p2], ret)
1397 1398 for f in changes[0] + changes[1]:
1398 1399 self.dirstate.normal(f)
1399 1400 for f in changes[2]:
1400 1401 self.dirstate.drop(f)
1401 1402 self.dirstate.setparents(ret)
1402 1403 ms.reset()
1403 1404 finally:
1404 1405 wlock.release()
1405 1406
1406 1407 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1407 1408 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1408 1409 self._afterlock(commithook)
1409 1410 return ret
1410 1411
1411 1412 def commitctx(self, ctx, error=False):
1412 1413 """Add a new revision to current repository.
1413 1414 Revision information is passed via the context argument.
1414 1415 """
1415 1416
1416 1417 tr = lock = None
1417 1418 removed = list(ctx.removed())
1418 1419 p1, p2 = ctx.p1(), ctx.p2()
1419 1420 user = ctx.user()
1420 1421
1421 1422 lock = self.lock()
1422 1423 try:
1423 1424 tr = self.transaction("commit")
1424 1425 trp = weakref.proxy(tr)
1425 1426
1426 1427 if ctx.files():
1427 1428 m1 = p1.manifest().copy()
1428 1429 m2 = p2.manifest()
1429 1430
1430 1431 # check in files
1431 1432 new = {}
1432 1433 changed = []
1433 1434 linkrev = len(self)
1434 1435 for f in sorted(ctx.modified() + ctx.added()):
1435 1436 self.ui.note(f + "\n")
1436 1437 try:
1437 1438 fctx = ctx[f]
1438 1439 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1439 1440 changed)
1440 1441 m1.set(f, fctx.flags())
1441 1442 except OSError, inst:
1442 1443 self.ui.warn(_("trouble committing %s!\n") % f)
1443 1444 raise
1444 1445 except IOError, inst:
1445 1446 errcode = getattr(inst, 'errno', errno.ENOENT)
1446 1447 if error or errcode and errcode != errno.ENOENT:
1447 1448 self.ui.warn(_("trouble committing %s!\n") % f)
1448 1449 raise
1449 1450 else:
1450 1451 removed.append(f)
1451 1452
1452 1453 # update manifest
1453 1454 m1.update(new)
1454 1455 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1455 1456 drop = [f for f in removed if f in m1]
1456 1457 for f in drop:
1457 1458 del m1[f]
1458 1459 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1459 1460 p2.manifestnode(), (new, drop))
1460 1461 files = changed + removed
1461 1462 else:
1462 1463 mn = p1.manifestnode()
1463 1464 files = []
1464 1465
1465 1466 # update changelog
1466 1467 self.changelog.delayupdate()
1467 1468 n = self.changelog.add(mn, files, ctx.description(),
1468 1469 trp, p1.node(), p2.node(),
1469 1470 user, ctx.date(), ctx.extra().copy())
1470 1471 p = lambda: self.changelog.writepending() and self.root or ""
1471 1472 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1472 1473 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1473 1474 parent2=xp2, pending=p)
1474 1475 self.changelog.finalize(trp)
1475 1476 # set the new commit is proper phase
1476 1477 targetphase = phases.newcommitphase(self.ui)
1477 1478 if targetphase:
1478 1479 # retract boundary do not alter parent changeset.
1479 1480 # if a parent have higher the resulting phase will
1480 1481 # be compliant anyway
1481 1482 #
1482 1483 # if minimal phase was 0 we don't need to retract anything
1483 1484 phases.retractboundary(self, targetphase, [n])
1484 1485 tr.close()
1485 1486 self.updatebranchcache()
1486 1487 return n
1487 1488 finally:
1488 1489 if tr:
1489 1490 tr.release()
1490 1491 lock.release()
1491 1492
1493 @unfilteredmeth
1492 1494 def destroyed(self, newheadnodes=None):
1493 1495 '''Inform the repository that nodes have been destroyed.
1494 1496 Intended for use by strip and rollback, so there's a common
1495 1497 place for anything that has to be done after destroying history.
1496 1498
1497 1499 If you know the branchheadcache was uptodate before nodes were removed
1498 1500 and you also know the set of candidate new heads that may have resulted
1499 1501 from the destruction, you can set newheadnodes. This will enable the
1500 1502 code to update the branchheads cache, rather than having future code
1501 1503 decide it's invalid and regenerating it from scratch.
1502 1504 '''
1503 1505 # If we have info, newheadnodes, on how to update the branch cache, do
1504 1506 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1505 1507 # will be caught the next time it is read.
1506 1508 if newheadnodes:
1507 1509 tiprev = len(self) - 1
1508 1510 ctxgen = (self[node] for node in newheadnodes
1509 1511 if self.changelog.hasnode(node))
1510 1512 self._updatebranchcache(self._branchcache, ctxgen)
1511 1513 self._writebranchcache(self._branchcache, self.changelog.tip(),
1512 1514 tiprev)
1513 1515
1514 1516 # Ensure the persistent tag cache is updated. Doing it now
1515 1517 # means that the tag cache only has to worry about destroyed
1516 1518 # heads immediately after a strip/rollback. That in turn
1517 1519 # guarantees that "cachetip == currenttip" (comparing both rev
1518 1520 # and node) always means no nodes have been added or destroyed.
1519 1521
1520 1522 # XXX this is suboptimal when qrefresh'ing: we strip the current
1521 1523 # head, refresh the tag cache, then immediately add a new head.
1522 1524 # But I think doing it this way is necessary for the "instant
1523 1525 # tag cache retrieval" case to work.
1524 1526 self.invalidatecaches()
1525 1527
1526 1528 # Discard all cache entries to force reloading everything.
1527 1529 self._filecache.clear()
1528 1530
1529 1531 def walk(self, match, node=None):
1530 1532 '''
1531 1533 walk recursively through the directory tree or a given
1532 1534 changeset, finding all files matched by the match
1533 1535 function
1534 1536 '''
1535 1537 return self[node].walk(match)
1536 1538
1537 1539 def status(self, node1='.', node2=None, match=None,
1538 1540 ignored=False, clean=False, unknown=False,
1539 1541 listsubrepos=False):
1540 1542 """return status of files between two nodes or node and working
1541 1543 directory.
1542 1544
1543 1545 If node1 is None, use the first dirstate parent instead.
1544 1546 If node2 is None, compare node1 with working directory.
1545 1547 """
1546 1548
1547 1549 def mfmatches(ctx):
1548 1550 mf = ctx.manifest().copy()
1549 1551 if match.always():
1550 1552 return mf
1551 1553 for fn in mf.keys():
1552 1554 if not match(fn):
1553 1555 del mf[fn]
1554 1556 return mf
1555 1557
1556 1558 if isinstance(node1, context.changectx):
1557 1559 ctx1 = node1
1558 1560 else:
1559 1561 ctx1 = self[node1]
1560 1562 if isinstance(node2, context.changectx):
1561 1563 ctx2 = node2
1562 1564 else:
1563 1565 ctx2 = self[node2]
1564 1566
1565 1567 working = ctx2.rev() is None
1566 1568 parentworking = working and ctx1 == self['.']
1567 1569 match = match or matchmod.always(self.root, self.getcwd())
1568 1570 listignored, listclean, listunknown = ignored, clean, unknown
1569 1571
1570 1572 # load earliest manifest first for caching reasons
1571 1573 if not working and ctx2.rev() < ctx1.rev():
1572 1574 ctx2.manifest()
1573 1575
1574 1576 if not parentworking:
1575 1577 def bad(f, msg):
1576 1578 # 'f' may be a directory pattern from 'match.files()',
1577 1579 # so 'f not in ctx1' is not enough
1578 1580 if f not in ctx1 and f not in ctx1.dirs():
1579 1581 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1580 1582 match.bad = bad
1581 1583
1582 1584 if working: # we need to scan the working dir
1583 1585 subrepos = []
1584 1586 if '.hgsub' in self.dirstate:
1585 1587 subrepos = ctx2.substate.keys()
1586 1588 s = self.dirstate.status(match, subrepos, listignored,
1587 1589 listclean, listunknown)
1588 1590 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1589 1591
1590 1592 # check for any possibly clean files
1591 1593 if parentworking and cmp:
1592 1594 fixup = []
1593 1595 # do a full compare of any files that might have changed
1594 1596 for f in sorted(cmp):
1595 1597 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1596 1598 or ctx1[f].cmp(ctx2[f])):
1597 1599 modified.append(f)
1598 1600 else:
1599 1601 fixup.append(f)
1600 1602
1601 1603 # update dirstate for files that are actually clean
1602 1604 if fixup:
1603 1605 if listclean:
1604 1606 clean += fixup
1605 1607
1606 1608 try:
1607 1609 # updating the dirstate is optional
1608 1610 # so we don't wait on the lock
1609 1611 wlock = self.wlock(False)
1610 1612 try:
1611 1613 for f in fixup:
1612 1614 self.dirstate.normal(f)
1613 1615 finally:
1614 1616 wlock.release()
1615 1617 except error.LockError:
1616 1618 pass
1617 1619
1618 1620 if not parentworking:
1619 1621 mf1 = mfmatches(ctx1)
1620 1622 if working:
1621 1623 # we are comparing working dir against non-parent
1622 1624 # generate a pseudo-manifest for the working dir
1623 1625 mf2 = mfmatches(self['.'])
1624 1626 for f in cmp + modified + added:
1625 1627 mf2[f] = None
1626 1628 mf2.set(f, ctx2.flags(f))
1627 1629 for f in removed:
1628 1630 if f in mf2:
1629 1631 del mf2[f]
1630 1632 else:
1631 1633 # we are comparing two revisions
1632 1634 deleted, unknown, ignored = [], [], []
1633 1635 mf2 = mfmatches(ctx2)
1634 1636
1635 1637 modified, added, clean = [], [], []
1636 1638 withflags = mf1.withflags() | mf2.withflags()
1637 1639 for fn in mf2:
1638 1640 if fn in mf1:
1639 1641 if (fn not in deleted and
1640 1642 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1641 1643 (mf1[fn] != mf2[fn] and
1642 1644 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1643 1645 modified.append(fn)
1644 1646 elif listclean:
1645 1647 clean.append(fn)
1646 1648 del mf1[fn]
1647 1649 elif fn not in deleted:
1648 1650 added.append(fn)
1649 1651 removed = mf1.keys()
1650 1652
1651 1653 if working and modified and not self.dirstate._checklink:
1652 1654 # Symlink placeholders may get non-symlink-like contents
1653 1655 # via user error or dereferencing by NFS or Samba servers,
1654 1656 # so we filter out any placeholders that don't look like a
1655 1657 # symlink
1656 1658 sane = []
1657 1659 for f in modified:
1658 1660 if ctx2.flags(f) == 'l':
1659 1661 d = ctx2[f].data()
1660 1662 if len(d) >= 1024 or '\n' in d or util.binary(d):
1661 1663 self.ui.debug('ignoring suspect symlink placeholder'
1662 1664 ' "%s"\n' % f)
1663 1665 continue
1664 1666 sane.append(f)
1665 1667 modified = sane
1666 1668
1667 1669 r = modified, added, removed, deleted, unknown, ignored, clean
1668 1670
1669 1671 if listsubrepos:
1670 1672 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1671 1673 if working:
1672 1674 rev2 = None
1673 1675 else:
1674 1676 rev2 = ctx2.substate[subpath][1]
1675 1677 try:
1676 1678 submatch = matchmod.narrowmatcher(subpath, match)
1677 1679 s = sub.status(rev2, match=submatch, ignored=listignored,
1678 1680 clean=listclean, unknown=listunknown,
1679 1681 listsubrepos=True)
1680 1682 for rfiles, sfiles in zip(r, s):
1681 1683 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1682 1684 except error.LookupError:
1683 1685 self.ui.status(_("skipping missing subrepository: %s\n")
1684 1686 % subpath)
1685 1687
1686 1688 for l in r:
1687 1689 l.sort()
1688 1690 return r
1689 1691
1690 1692 def heads(self, start=None):
1691 1693 heads = self.changelog.heads(start)
1692 1694 # sort the output in rev descending order
1693 1695 return sorted(heads, key=self.changelog.rev, reverse=True)
1694 1696
1695 1697 def branchheads(self, branch=None, start=None, closed=False):
1696 1698 '''return a (possibly filtered) list of heads for the given branch
1697 1699
1698 1700 Heads are returned in topological order, from newest to oldest.
1699 1701 If branch is None, use the dirstate branch.
1700 1702 If start is not None, return only heads reachable from start.
1701 1703 If closed is True, return heads that are marked as closed as well.
1702 1704 '''
1703 1705 if branch is None:
1704 1706 branch = self[None].branch()
1705 1707 branches = self.branchmap()
1706 1708 if branch not in branches:
1707 1709 return []
1708 1710 # the cache returns heads ordered lowest to highest
1709 1711 bheads = list(reversed(branches[branch]))
1710 1712 if start is not None:
1711 1713 # filter out the heads that cannot be reached from startrev
1712 1714 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1713 1715 bheads = [h for h in bheads if h in fbheads]
1714 1716 if not closed:
1715 1717 bheads = [h for h in bheads if not self[h].closesbranch()]
1716 1718 return bheads
1717 1719
1718 1720 def branches(self, nodes):
1719 1721 if not nodes:
1720 1722 nodes = [self.changelog.tip()]
1721 1723 b = []
1722 1724 for n in nodes:
1723 1725 t = n
1724 1726 while True:
1725 1727 p = self.changelog.parents(n)
1726 1728 if p[1] != nullid or p[0] == nullid:
1727 1729 b.append((t, n, p[0], p[1]))
1728 1730 break
1729 1731 n = p[0]
1730 1732 return b
1731 1733
1732 1734 def between(self, pairs):
1733 1735 r = []
1734 1736
1735 1737 for top, bottom in pairs:
1736 1738 n, l, i = top, [], 0
1737 1739 f = 1
1738 1740
1739 1741 while n != bottom and n != nullid:
1740 1742 p = self.changelog.parents(n)[0]
1741 1743 if i == f:
1742 1744 l.append(n)
1743 1745 f = f * 2
1744 1746 n = p
1745 1747 i += 1
1746 1748
1747 1749 r.append(l)
1748 1750
1749 1751 return r
1750 1752
1751 1753 def pull(self, remote, heads=None, force=False):
1752 1754 # don't open transaction for nothing or you break future useful
1753 1755 # rollback call
1754 1756 tr = None
1755 1757 trname = 'pull\n' + util.hidepassword(remote.url())
1756 1758 lock = self.lock()
1757 1759 try:
1758 1760 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1759 1761 force=force)
1760 1762 common, fetch, rheads = tmp
1761 1763 if not fetch:
1762 1764 self.ui.status(_("no changes found\n"))
1763 1765 added = []
1764 1766 result = 0
1765 1767 else:
1766 1768 tr = self.transaction(trname)
1767 1769 if heads is None and list(common) == [nullid]:
1768 1770 self.ui.status(_("requesting all changes\n"))
1769 1771 elif heads is None and remote.capable('changegroupsubset'):
1770 1772 # issue1320, avoid a race if remote changed after discovery
1771 1773 heads = rheads
1772 1774
1773 1775 if remote.capable('getbundle'):
1774 1776 cg = remote.getbundle('pull', common=common,
1775 1777 heads=heads or rheads)
1776 1778 elif heads is None:
1777 1779 cg = remote.changegroup(fetch, 'pull')
1778 1780 elif not remote.capable('changegroupsubset'):
1779 1781 raise util.Abort(_("partial pull cannot be done because "
1780 1782 "other repository doesn't support "
1781 1783 "changegroupsubset."))
1782 1784 else:
1783 1785 cg = remote.changegroupsubset(fetch, heads, 'pull')
1784 1786 clstart = len(self.changelog)
1785 1787 result = self.addchangegroup(cg, 'pull', remote.url())
1786 1788 clend = len(self.changelog)
1787 1789 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1788 1790
1789 1791 # compute target subset
1790 1792 if heads is None:
1791 1793 # We pulled every thing possible
1792 1794 # sync on everything common
1793 1795 subset = common + added
1794 1796 else:
1795 1797 # We pulled a specific subset
1796 1798 # sync on this subset
1797 1799 subset = heads
1798 1800
1799 1801 # Get remote phases data from remote
1800 1802 remotephases = remote.listkeys('phases')
1801 1803 publishing = bool(remotephases.get('publishing', False))
1802 1804 if remotephases and not publishing:
1803 1805 # remote is new and unpublishing
1804 1806 pheads, _dr = phases.analyzeremotephases(self, subset,
1805 1807 remotephases)
1806 1808 phases.advanceboundary(self, phases.public, pheads)
1807 1809 phases.advanceboundary(self, phases.draft, subset)
1808 1810 else:
1809 1811 # Remote is old or publishing all common changesets
1810 1812 # should be seen as public
1811 1813 phases.advanceboundary(self, phases.public, subset)
1812 1814
1813 1815 if obsolete._enabled:
1814 1816 self.ui.debug('fetching remote obsolete markers\n')
1815 1817 remoteobs = remote.listkeys('obsolete')
1816 1818 if 'dump0' in remoteobs:
1817 1819 if tr is None:
1818 1820 tr = self.transaction(trname)
1819 1821 for key in sorted(remoteobs, reverse=True):
1820 1822 if key.startswith('dump'):
1821 1823 data = base85.b85decode(remoteobs[key])
1822 1824 self.obsstore.mergemarkers(tr, data)
1823 1825 if tr is not None:
1824 1826 tr.close()
1825 1827 finally:
1826 1828 if tr is not None:
1827 1829 tr.release()
1828 1830 lock.release()
1829 1831
1830 1832 return result
1831 1833
1832 1834 def checkpush(self, force, revs):
1833 1835 """Extensions can override this function if additional checks have
1834 1836 to be performed before pushing, or call it if they override push
1835 1837 command.
1836 1838 """
1837 1839 pass
1838 1840
1839 1841 def push(self, remote, force=False, revs=None, newbranch=False):
1840 1842 '''Push outgoing changesets (limited by revs) from the current
1841 1843 repository to remote. Return an integer:
1842 1844 - None means nothing to push
1843 1845 - 0 means HTTP error
1844 1846 - 1 means we pushed and remote head count is unchanged *or*
1845 1847 we have outgoing changesets but refused to push
1846 1848 - other values as described by addchangegroup()
1847 1849 '''
1848 1850 # there are two ways to push to remote repo:
1849 1851 #
1850 1852 # addchangegroup assumes local user can lock remote
1851 1853 # repo (local filesystem, old ssh servers).
1852 1854 #
1853 1855 # unbundle assumes local user cannot lock remote repo (new ssh
1854 1856 # servers, http servers).
1855 1857
1856 1858 if not remote.canpush():
1857 1859 raise util.Abort(_("destination does not support push"))
1858 1860 # get local lock as we might write phase data
1859 1861 locallock = self.lock()
1860 1862 try:
1861 1863 self.checkpush(force, revs)
1862 1864 lock = None
1863 1865 unbundle = remote.capable('unbundle')
1864 1866 if not unbundle:
1865 1867 lock = remote.lock()
1866 1868 try:
1867 1869 # discovery
1868 1870 fci = discovery.findcommonincoming
1869 1871 commoninc = fci(self, remote, force=force)
1870 1872 common, inc, remoteheads = commoninc
1871 1873 fco = discovery.findcommonoutgoing
1872 1874 outgoing = fco(self, remote, onlyheads=revs,
1873 1875 commoninc=commoninc, force=force)
1874 1876
1875 1877
1876 1878 if not outgoing.missing:
1877 1879 # nothing to push
1878 1880 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1879 1881 ret = None
1880 1882 else:
1881 1883 # something to push
1882 1884 if not force:
1883 1885 # if self.obsstore == False --> no obsolete
1884 1886 # then, save the iteration
1885 1887 if self.obsstore:
1886 1888 # this message are here for 80 char limit reason
1887 1889 mso = _("push includes obsolete changeset: %s!")
1888 1890 msu = _("push includes unstable changeset: %s!")
1889 1891 msb = _("push includes bumped changeset: %s!")
1890 1892 # If we are to push if there is at least one
1891 1893 # obsolete or unstable changeset in missing, at
1892 1894 # least one of the missinghead will be obsolete or
1893 1895 # unstable. So checking heads only is ok
1894 1896 for node in outgoing.missingheads:
1895 1897 ctx = self[node]
1896 1898 if ctx.obsolete():
1897 1899 raise util.Abort(mso % ctx)
1898 1900 elif ctx.unstable():
1899 1901 raise util.Abort(msu % ctx)
1900 1902 elif ctx.bumped():
1901 1903 raise util.Abort(msb % ctx)
1902 1904 discovery.checkheads(self, remote, outgoing,
1903 1905 remoteheads, newbranch,
1904 1906 bool(inc))
1905 1907
1906 1908 # create a changegroup from local
1907 1909 if revs is None and not outgoing.excluded:
1908 1910 # push everything,
1909 1911 # use the fast path, no race possible on push
1910 1912 cg = self._changegroup(outgoing.missing, 'push')
1911 1913 else:
1912 1914 cg = self.getlocalbundle('push', outgoing)
1913 1915
1914 1916 # apply changegroup to remote
1915 1917 if unbundle:
1916 1918 # local repo finds heads on server, finds out what
1917 1919 # revs it must push. once revs transferred, if server
1918 1920 # finds it has different heads (someone else won
1919 1921 # commit/push race), server aborts.
1920 1922 if force:
1921 1923 remoteheads = ['force']
1922 1924 # ssh: return remote's addchangegroup()
1923 1925 # http: return remote's addchangegroup() or 0 for error
1924 1926 ret = remote.unbundle(cg, remoteheads, 'push')
1925 1927 else:
1926 1928 # we return an integer indicating remote head count
1927 1929 # change
1928 1930 ret = remote.addchangegroup(cg, 'push', self.url())
1929 1931
1930 1932 if ret:
1931 1933 # push succeed, synchronize target of the push
1932 1934 cheads = outgoing.missingheads
1933 1935 elif revs is None:
1934 1936 # All out push fails. synchronize all common
1935 1937 cheads = outgoing.commonheads
1936 1938 else:
1937 1939 # I want cheads = heads(::missingheads and ::commonheads)
1938 1940 # (missingheads is revs with secret changeset filtered out)
1939 1941 #
1940 1942 # This can be expressed as:
1941 1943 # cheads = ( (missingheads and ::commonheads)
1942 1944 # + (commonheads and ::missingheads))"
1943 1945 # )
1944 1946 #
1945 1947 # while trying to push we already computed the following:
1946 1948 # common = (::commonheads)
1947 1949 # missing = ((commonheads::missingheads) - commonheads)
1948 1950 #
1949 1951 # We can pick:
1950 1952 # * missingheads part of common (::commonheads)
1951 1953 common = set(outgoing.common)
1952 1954 cheads = [node for node in revs if node in common]
1953 1955 # and
1954 1956 # * commonheads parents on missing
1955 1957 revset = self.set('%ln and parents(roots(%ln))',
1956 1958 outgoing.commonheads,
1957 1959 outgoing.missing)
1958 1960 cheads.extend(c.node() for c in revset)
1959 1961 # even when we don't push, exchanging phase data is useful
1960 1962 remotephases = remote.listkeys('phases')
1961 1963 if not remotephases: # old server or public only repo
1962 1964 phases.advanceboundary(self, phases.public, cheads)
1963 1965 # don't push any phase data as there is nothing to push
1964 1966 else:
1965 1967 ana = phases.analyzeremotephases(self, cheads, remotephases)
1966 1968 pheads, droots = ana
1967 1969 ### Apply remote phase on local
1968 1970 if remotephases.get('publishing', False):
1969 1971 phases.advanceboundary(self, phases.public, cheads)
1970 1972 else: # publish = False
1971 1973 phases.advanceboundary(self, phases.public, pheads)
1972 1974 phases.advanceboundary(self, phases.draft, cheads)
1973 1975 ### Apply local phase on remote
1974 1976
1975 1977 # Get the list of all revs draft on remote by public here.
1976 1978 # XXX Beware that revset break if droots is not strictly
1977 1979 # XXX root we may want to ensure it is but it is costly
1978 1980 outdated = self.set('heads((%ln::%ln) and public())',
1979 1981 droots, cheads)
1980 1982 for newremotehead in outdated:
1981 1983 r = remote.pushkey('phases',
1982 1984 newremotehead.hex(),
1983 1985 str(phases.draft),
1984 1986 str(phases.public))
1985 1987 if not r:
1986 1988 self.ui.warn(_('updating %s to public failed!\n')
1987 1989 % newremotehead)
1988 1990 self.ui.debug('try to push obsolete markers to remote\n')
1989 1991 if (obsolete._enabled and self.obsstore and
1990 1992 'obsolete' in remote.listkeys('namespaces')):
1991 1993 rslts = []
1992 1994 remotedata = self.listkeys('obsolete')
1993 1995 for key in sorted(remotedata, reverse=True):
1994 1996 # reverse sort to ensure we end with dump0
1995 1997 data = remotedata[key]
1996 1998 rslts.append(remote.pushkey('obsolete', key, '', data))
1997 1999 if [r for r in rslts if not r]:
1998 2000 msg = _('failed to push some obsolete markers!\n')
1999 2001 self.ui.warn(msg)
2000 2002 finally:
2001 2003 if lock is not None:
2002 2004 lock.release()
2003 2005 finally:
2004 2006 locallock.release()
2005 2007
2006 2008 self.ui.debug("checking for updated bookmarks\n")
2007 2009 rb = remote.listkeys('bookmarks')
2008 2010 for k in rb.keys():
2009 2011 if k in self._bookmarks:
2010 2012 nr, nl = rb[k], hex(self._bookmarks[k])
2011 2013 if nr in self:
2012 2014 cr = self[nr]
2013 2015 cl = self[nl]
2014 2016 if bookmarks.validdest(self, cr, cl):
2015 2017 r = remote.pushkey('bookmarks', k, nr, nl)
2016 2018 if r:
2017 2019 self.ui.status(_("updating bookmark %s\n") % k)
2018 2020 else:
2019 2021 self.ui.warn(_('updating bookmark %s'
2020 2022 ' failed!\n') % k)
2021 2023
2022 2024 return ret
2023 2025
2024 2026 def changegroupinfo(self, nodes, source):
2025 2027 if self.ui.verbose or source == 'bundle':
2026 2028 self.ui.status(_("%d changesets found\n") % len(nodes))
2027 2029 if self.ui.debugflag:
2028 2030 self.ui.debug("list of changesets:\n")
2029 2031 for node in nodes:
2030 2032 self.ui.debug("%s\n" % hex(node))
2031 2033
2032 2034 def changegroupsubset(self, bases, heads, source):
2033 2035 """Compute a changegroup consisting of all the nodes that are
2034 2036 descendants of any of the bases and ancestors of any of the heads.
2035 2037 Return a chunkbuffer object whose read() method will return
2036 2038 successive changegroup chunks.
2037 2039
2038 2040 It is fairly complex as determining which filenodes and which
2039 2041 manifest nodes need to be included for the changeset to be complete
2040 2042 is non-trivial.
2041 2043
2042 2044 Another wrinkle is doing the reverse, figuring out which changeset in
2043 2045 the changegroup a particular filenode or manifestnode belongs to.
2044 2046 """
2045 2047 cl = self.changelog
2046 2048 if not bases:
2047 2049 bases = [nullid]
2048 2050 csets, bases, heads = cl.nodesbetween(bases, heads)
2049 2051 # We assume that all ancestors of bases are known
2050 2052 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2051 2053 return self._changegroupsubset(common, csets, heads, source)
2052 2054
2053 2055 def getlocalbundle(self, source, outgoing):
2054 2056 """Like getbundle, but taking a discovery.outgoing as an argument.
2055 2057
2056 2058 This is only implemented for local repos and reuses potentially
2057 2059 precomputed sets in outgoing."""
2058 2060 if not outgoing.missing:
2059 2061 return None
2060 2062 return self._changegroupsubset(outgoing.common,
2061 2063 outgoing.missing,
2062 2064 outgoing.missingheads,
2063 2065 source)
2064 2066
2065 2067 def getbundle(self, source, heads=None, common=None):
2066 2068 """Like changegroupsubset, but returns the set difference between the
2067 2069 ancestors of heads and the ancestors common.
2068 2070
2069 2071 If heads is None, use the local heads. If common is None, use [nullid].
2070 2072
2071 2073 The nodes in common might not all be known locally due to the way the
2072 2074 current discovery protocol works.
2073 2075 """
2074 2076 cl = self.changelog
2075 2077 if common:
2076 2078 nm = cl.nodemap
2077 2079 common = [n for n in common if n in nm]
2078 2080 else:
2079 2081 common = [nullid]
2080 2082 if not heads:
2081 2083 heads = cl.heads()
2082 2084 return self.getlocalbundle(source,
2083 2085 discovery.outgoing(cl, common, heads))
2084 2086
2085 2087 def _changegroupsubset(self, commonrevs, csets, heads, source):
2086 2088
2087 2089 cl = self.changelog
2088 2090 mf = self.manifest
2089 2091 mfs = {} # needed manifests
2090 2092 fnodes = {} # needed file nodes
2091 2093 changedfiles = set()
2092 2094 fstate = ['', {}]
2093 2095 count = [0, 0]
2094 2096
2095 2097 # can we go through the fast path ?
2096 2098 heads.sort()
2097 2099 if heads == sorted(self.heads()):
2098 2100 return self._changegroup(csets, source)
2099 2101
2100 2102 # slow path
2101 2103 self.hook('preoutgoing', throw=True, source=source)
2102 2104 self.changegroupinfo(csets, source)
2103 2105
2104 2106 # filter any nodes that claim to be part of the known set
2105 2107 def prune(revlog, missing):
2106 2108 rr, rl = revlog.rev, revlog.linkrev
2107 2109 return [n for n in missing
2108 2110 if rl(rr(n)) not in commonrevs]
2109 2111
2110 2112 progress = self.ui.progress
2111 2113 _bundling = _('bundling')
2112 2114 _changesets = _('changesets')
2113 2115 _manifests = _('manifests')
2114 2116 _files = _('files')
2115 2117
2116 2118 def lookup(revlog, x):
2117 2119 if revlog == cl:
2118 2120 c = cl.read(x)
2119 2121 changedfiles.update(c[3])
2120 2122 mfs.setdefault(c[0], x)
2121 2123 count[0] += 1
2122 2124 progress(_bundling, count[0],
2123 2125 unit=_changesets, total=count[1])
2124 2126 return x
2125 2127 elif revlog == mf:
2126 2128 clnode = mfs[x]
2127 2129 mdata = mf.readfast(x)
2128 2130 for f, n in mdata.iteritems():
2129 2131 if f in changedfiles:
2130 2132 fnodes[f].setdefault(n, clnode)
2131 2133 count[0] += 1
2132 2134 progress(_bundling, count[0],
2133 2135 unit=_manifests, total=count[1])
2134 2136 return clnode
2135 2137 else:
2136 2138 progress(_bundling, count[0], item=fstate[0],
2137 2139 unit=_files, total=count[1])
2138 2140 return fstate[1][x]
2139 2141
2140 2142 bundler = changegroup.bundle10(lookup)
2141 2143 reorder = self.ui.config('bundle', 'reorder', 'auto')
2142 2144 if reorder == 'auto':
2143 2145 reorder = None
2144 2146 else:
2145 2147 reorder = util.parsebool(reorder)
2146 2148
2147 2149 def gengroup():
2148 2150 # Create a changenode group generator that will call our functions
2149 2151 # back to lookup the owning changenode and collect information.
2150 2152 count[:] = [0, len(csets)]
2151 2153 for chunk in cl.group(csets, bundler, reorder=reorder):
2152 2154 yield chunk
2153 2155 progress(_bundling, None)
2154 2156
2155 2157 # Create a generator for the manifestnodes that calls our lookup
2156 2158 # and data collection functions back.
2157 2159 for f in changedfiles:
2158 2160 fnodes[f] = {}
2159 2161 count[:] = [0, len(mfs)]
2160 2162 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2161 2163 yield chunk
2162 2164 progress(_bundling, None)
2163 2165
2164 2166 mfs.clear()
2165 2167
2166 2168 # Go through all our files in order sorted by name.
2167 2169 count[:] = [0, len(changedfiles)]
2168 2170 for fname in sorted(changedfiles):
2169 2171 filerevlog = self.file(fname)
2170 2172 if not len(filerevlog):
2171 2173 raise util.Abort(_("empty or missing revlog for %s")
2172 2174 % fname)
2173 2175 fstate[0] = fname
2174 2176 fstate[1] = fnodes.pop(fname, {})
2175 2177
2176 2178 nodelist = prune(filerevlog, fstate[1])
2177 2179 if nodelist:
2178 2180 count[0] += 1
2179 2181 yield bundler.fileheader(fname)
2180 2182 for chunk in filerevlog.group(nodelist, bundler, reorder):
2181 2183 yield chunk
2182 2184
2183 2185 # Signal that no more groups are left.
2184 2186 yield bundler.close()
2185 2187 progress(_bundling, None)
2186 2188
2187 2189 if csets:
2188 2190 self.hook('outgoing', node=hex(csets[0]), source=source)
2189 2191
2190 2192 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2191 2193
2192 2194 def changegroup(self, basenodes, source):
2193 2195 # to avoid a race we use changegroupsubset() (issue1320)
2194 2196 return self.changegroupsubset(basenodes, self.heads(), source)
2195 2197
2196 2198 def _changegroup(self, nodes, source):
2197 2199 """Compute the changegroup of all nodes that we have that a recipient
2198 2200 doesn't. Return a chunkbuffer object whose read() method will return
2199 2201 successive changegroup chunks.
2200 2202
2201 2203 This is much easier than the previous function as we can assume that
2202 2204 the recipient has any changenode we aren't sending them.
2203 2205
2204 2206 nodes is the set of nodes to send"""
2205 2207
2206 2208 cl = self.changelog
2207 2209 mf = self.manifest
2208 2210 mfs = {}
2209 2211 changedfiles = set()
2210 2212 fstate = ['']
2211 2213 count = [0, 0]
2212 2214
2213 2215 self.hook('preoutgoing', throw=True, source=source)
2214 2216 self.changegroupinfo(nodes, source)
2215 2217
2216 2218 revset = set([cl.rev(n) for n in nodes])
2217 2219
2218 2220 def gennodelst(log):
2219 2221 ln, llr = log.node, log.linkrev
2220 2222 return [ln(r) for r in log if llr(r) in revset]
2221 2223
2222 2224 progress = self.ui.progress
2223 2225 _bundling = _('bundling')
2224 2226 _changesets = _('changesets')
2225 2227 _manifests = _('manifests')
2226 2228 _files = _('files')
2227 2229
2228 2230 def lookup(revlog, x):
2229 2231 if revlog == cl:
2230 2232 c = cl.read(x)
2231 2233 changedfiles.update(c[3])
2232 2234 mfs.setdefault(c[0], x)
2233 2235 count[0] += 1
2234 2236 progress(_bundling, count[0],
2235 2237 unit=_changesets, total=count[1])
2236 2238 return x
2237 2239 elif revlog == mf:
2238 2240 count[0] += 1
2239 2241 progress(_bundling, count[0],
2240 2242 unit=_manifests, total=count[1])
2241 2243 return cl.node(revlog.linkrev(revlog.rev(x)))
2242 2244 else:
2243 2245 progress(_bundling, count[0], item=fstate[0],
2244 2246 total=count[1], unit=_files)
2245 2247 return cl.node(revlog.linkrev(revlog.rev(x)))
2246 2248
2247 2249 bundler = changegroup.bundle10(lookup)
2248 2250 reorder = self.ui.config('bundle', 'reorder', 'auto')
2249 2251 if reorder == 'auto':
2250 2252 reorder = None
2251 2253 else:
2252 2254 reorder = util.parsebool(reorder)
2253 2255
2254 2256 def gengroup():
2255 2257 '''yield a sequence of changegroup chunks (strings)'''
2256 2258 # construct a list of all changed files
2257 2259
2258 2260 count[:] = [0, len(nodes)]
2259 2261 for chunk in cl.group(nodes, bundler, reorder=reorder):
2260 2262 yield chunk
2261 2263 progress(_bundling, None)
2262 2264
2263 2265 count[:] = [0, len(mfs)]
2264 2266 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2265 2267 yield chunk
2266 2268 progress(_bundling, None)
2267 2269
2268 2270 count[:] = [0, len(changedfiles)]
2269 2271 for fname in sorted(changedfiles):
2270 2272 filerevlog = self.file(fname)
2271 2273 if not len(filerevlog):
2272 2274 raise util.Abort(_("empty or missing revlog for %s")
2273 2275 % fname)
2274 2276 fstate[0] = fname
2275 2277 nodelist = gennodelst(filerevlog)
2276 2278 if nodelist:
2277 2279 count[0] += 1
2278 2280 yield bundler.fileheader(fname)
2279 2281 for chunk in filerevlog.group(nodelist, bundler, reorder):
2280 2282 yield chunk
2281 2283 yield bundler.close()
2282 2284 progress(_bundling, None)
2283 2285
2284 2286 if nodes:
2285 2287 self.hook('outgoing', node=hex(nodes[0]), source=source)
2286 2288
2287 2289 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2288 2290
2289 2291 def addchangegroup(self, source, srctype, url, emptyok=False):
2290 2292 """Add the changegroup returned by source.read() to this repo.
2291 2293 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2292 2294 the URL of the repo where this changegroup is coming from.
2293 2295
2294 2296 Return an integer summarizing the change to this repo:
2295 2297 - nothing changed or no source: 0
2296 2298 - more heads than before: 1+added heads (2..n)
2297 2299 - fewer heads than before: -1-removed heads (-2..-n)
2298 2300 - number of heads stays the same: 1
2299 2301 """
2300 2302 def csmap(x):
2301 2303 self.ui.debug("add changeset %s\n" % short(x))
2302 2304 return len(cl)
2303 2305
2304 2306 def revmap(x):
2305 2307 return cl.rev(x)
2306 2308
2307 2309 if not source:
2308 2310 return 0
2309 2311
2310 2312 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2311 2313
2312 2314 changesets = files = revisions = 0
2313 2315 efiles = set()
2314 2316
2315 2317 # write changelog data to temp files so concurrent readers will not see
2316 2318 # inconsistent view
2317 2319 cl = self.changelog
2318 2320 cl.delayupdate()
2319 2321 oldheads = cl.heads()
2320 2322
2321 2323 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2322 2324 try:
2323 2325 trp = weakref.proxy(tr)
2324 2326 # pull off the changeset group
2325 2327 self.ui.status(_("adding changesets\n"))
2326 2328 clstart = len(cl)
2327 2329 class prog(object):
2328 2330 step = _('changesets')
2329 2331 count = 1
2330 2332 ui = self.ui
2331 2333 total = None
2332 2334 def __call__(self):
2333 2335 self.ui.progress(self.step, self.count, unit=_('chunks'),
2334 2336 total=self.total)
2335 2337 self.count += 1
2336 2338 pr = prog()
2337 2339 source.callback = pr
2338 2340
2339 2341 source.changelogheader()
2340 2342 srccontent = cl.addgroup(source, csmap, trp)
2341 2343 if not (srccontent or emptyok):
2342 2344 raise util.Abort(_("received changelog group is empty"))
2343 2345 clend = len(cl)
2344 2346 changesets = clend - clstart
2345 2347 for c in xrange(clstart, clend):
2346 2348 efiles.update(self[c].files())
2347 2349 efiles = len(efiles)
2348 2350 self.ui.progress(_('changesets'), None)
2349 2351
2350 2352 # pull off the manifest group
2351 2353 self.ui.status(_("adding manifests\n"))
2352 2354 pr.step = _('manifests')
2353 2355 pr.count = 1
2354 2356 pr.total = changesets # manifests <= changesets
2355 2357 # no need to check for empty manifest group here:
2356 2358 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2357 2359 # no new manifest will be created and the manifest group will
2358 2360 # be empty during the pull
2359 2361 source.manifestheader()
2360 2362 self.manifest.addgroup(source, revmap, trp)
2361 2363 self.ui.progress(_('manifests'), None)
2362 2364
2363 2365 needfiles = {}
2364 2366 if self.ui.configbool('server', 'validate', default=False):
2365 2367 # validate incoming csets have their manifests
2366 2368 for cset in xrange(clstart, clend):
2367 2369 mfest = self.changelog.read(self.changelog.node(cset))[0]
2368 2370 mfest = self.manifest.readdelta(mfest)
2369 2371 # store file nodes we must see
2370 2372 for f, n in mfest.iteritems():
2371 2373 needfiles.setdefault(f, set()).add(n)
2372 2374
2373 2375 # process the files
2374 2376 self.ui.status(_("adding file changes\n"))
2375 2377 pr.step = _('files')
2376 2378 pr.count = 1
2377 2379 pr.total = efiles
2378 2380 source.callback = None
2379 2381
2380 2382 while True:
2381 2383 chunkdata = source.filelogheader()
2382 2384 if not chunkdata:
2383 2385 break
2384 2386 f = chunkdata["filename"]
2385 2387 self.ui.debug("adding %s revisions\n" % f)
2386 2388 pr()
2387 2389 fl = self.file(f)
2388 2390 o = len(fl)
2389 2391 if not fl.addgroup(source, revmap, trp):
2390 2392 raise util.Abort(_("received file revlog group is empty"))
2391 2393 revisions += len(fl) - o
2392 2394 files += 1
2393 2395 if f in needfiles:
2394 2396 needs = needfiles[f]
2395 2397 for new in xrange(o, len(fl)):
2396 2398 n = fl.node(new)
2397 2399 if n in needs:
2398 2400 needs.remove(n)
2399 2401 if not needs:
2400 2402 del needfiles[f]
2401 2403 self.ui.progress(_('files'), None)
2402 2404
2403 2405 for f, needs in needfiles.iteritems():
2404 2406 fl = self.file(f)
2405 2407 for n in needs:
2406 2408 try:
2407 2409 fl.rev(n)
2408 2410 except error.LookupError:
2409 2411 raise util.Abort(
2410 2412 _('missing file data for %s:%s - run hg verify') %
2411 2413 (f, hex(n)))
2412 2414
2413 2415 dh = 0
2414 2416 if oldheads:
2415 2417 heads = cl.heads()
2416 2418 dh = len(heads) - len(oldheads)
2417 2419 for h in heads:
2418 2420 if h not in oldheads and self[h].closesbranch():
2419 2421 dh -= 1
2420 2422 htext = ""
2421 2423 if dh:
2422 2424 htext = _(" (%+d heads)") % dh
2423 2425
2424 2426 self.ui.status(_("added %d changesets"
2425 2427 " with %d changes to %d files%s\n")
2426 2428 % (changesets, revisions, files, htext))
2427 2429 obsolete.clearobscaches(self)
2428 2430
2429 2431 if changesets > 0:
2430 2432 p = lambda: cl.writepending() and self.root or ""
2431 2433 self.hook('pretxnchangegroup', throw=True,
2432 2434 node=hex(cl.node(clstart)), source=srctype,
2433 2435 url=url, pending=p)
2434 2436
2435 2437 added = [cl.node(r) for r in xrange(clstart, clend)]
2436 2438 publishing = self.ui.configbool('phases', 'publish', True)
2437 2439 if srctype == 'push':
2438 2440 # Old server can not push the boundary themself.
2439 2441 # New server won't push the boundary if changeset already
2440 2442 # existed locally as secrete
2441 2443 #
2442 2444 # We should not use added here but the list of all change in
2443 2445 # the bundle
2444 2446 if publishing:
2445 2447 phases.advanceboundary(self, phases.public, srccontent)
2446 2448 else:
2447 2449 phases.advanceboundary(self, phases.draft, srccontent)
2448 2450 phases.retractboundary(self, phases.draft, added)
2449 2451 elif srctype != 'strip':
2450 2452 # publishing only alter behavior during push
2451 2453 #
2452 2454 # strip should not touch boundary at all
2453 2455 phases.retractboundary(self, phases.draft, added)
2454 2456
2455 2457 # make changelog see real files again
2456 2458 cl.finalize(trp)
2457 2459
2458 2460 tr.close()
2459 2461
2460 2462 if changesets > 0:
2461 2463 self.updatebranchcache()
2462 2464 def runhooks():
2463 2465 # forcefully update the on-disk branch cache
2464 2466 self.ui.debug("updating the branch cache\n")
2465 2467 self.hook("changegroup", node=hex(cl.node(clstart)),
2466 2468 source=srctype, url=url)
2467 2469
2468 2470 for n in added:
2469 2471 self.hook("incoming", node=hex(n), source=srctype,
2470 2472 url=url)
2471 2473 self._afterlock(runhooks)
2472 2474
2473 2475 finally:
2474 2476 tr.release()
2475 2477 # never return 0 here:
2476 2478 if dh < 0:
2477 2479 return dh - 1
2478 2480 else:
2479 2481 return dh + 1
2480 2482
2481 2483 def stream_in(self, remote, requirements):
2482 2484 lock = self.lock()
2483 2485 try:
2484 2486 # Save remote branchmap. We will use it later
2485 2487 # to speed up branchcache creation
2486 2488 rbranchmap = None
2487 2489 if remote.capable("branchmap"):
2488 2490 rbranchmap = remote.branchmap()
2489 2491
2490 2492 fp = remote.stream_out()
2491 2493 l = fp.readline()
2492 2494 try:
2493 2495 resp = int(l)
2494 2496 except ValueError:
2495 2497 raise error.ResponseError(
2496 2498 _('unexpected response from remote server:'), l)
2497 2499 if resp == 1:
2498 2500 raise util.Abort(_('operation forbidden by server'))
2499 2501 elif resp == 2:
2500 2502 raise util.Abort(_('locking the remote repository failed'))
2501 2503 elif resp != 0:
2502 2504 raise util.Abort(_('the server sent an unknown error code'))
2503 2505 self.ui.status(_('streaming all changes\n'))
2504 2506 l = fp.readline()
2505 2507 try:
2506 2508 total_files, total_bytes = map(int, l.split(' ', 1))
2507 2509 except (ValueError, TypeError):
2508 2510 raise error.ResponseError(
2509 2511 _('unexpected response from remote server:'), l)
2510 2512 self.ui.status(_('%d files to transfer, %s of data\n') %
2511 2513 (total_files, util.bytecount(total_bytes)))
2512 2514 handled_bytes = 0
2513 2515 self.ui.progress(_('clone'), 0, total=total_bytes)
2514 2516 start = time.time()
2515 2517 for i in xrange(total_files):
2516 2518 # XXX doesn't support '\n' or '\r' in filenames
2517 2519 l = fp.readline()
2518 2520 try:
2519 2521 name, size = l.split('\0', 1)
2520 2522 size = int(size)
2521 2523 except (ValueError, TypeError):
2522 2524 raise error.ResponseError(
2523 2525 _('unexpected response from remote server:'), l)
2524 2526 if self.ui.debugflag:
2525 2527 self.ui.debug('adding %s (%s)\n' %
2526 2528 (name, util.bytecount(size)))
2527 2529 # for backwards compat, name was partially encoded
2528 2530 ofp = self.sopener(store.decodedir(name), 'w')
2529 2531 for chunk in util.filechunkiter(fp, limit=size):
2530 2532 handled_bytes += len(chunk)
2531 2533 self.ui.progress(_('clone'), handled_bytes,
2532 2534 total=total_bytes)
2533 2535 ofp.write(chunk)
2534 2536 ofp.close()
2535 2537 elapsed = time.time() - start
2536 2538 if elapsed <= 0:
2537 2539 elapsed = 0.001
2538 2540 self.ui.progress(_('clone'), None)
2539 2541 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2540 2542 (util.bytecount(total_bytes), elapsed,
2541 2543 util.bytecount(total_bytes / elapsed)))
2542 2544
2543 2545 # new requirements = old non-format requirements +
2544 2546 # new format-related
2545 2547 # requirements from the streamed-in repository
2546 2548 requirements.update(set(self.requirements) - self.supportedformats)
2547 2549 self._applyrequirements(requirements)
2548 2550 self._writerequirements()
2549 2551
2550 2552 if rbranchmap:
2551 2553 rbheads = []
2552 2554 for bheads in rbranchmap.itervalues():
2553 2555 rbheads.extend(bheads)
2554 2556
2555 2557 self.branchcache = rbranchmap
2556 2558 if rbheads:
2557 2559 rtiprev = max((int(self.changelog.rev(node))
2558 2560 for node in rbheads))
2559 2561 self._writebranchcache(self.branchcache,
2560 2562 self[rtiprev].node(), rtiprev)
2561 2563 self.invalidate()
2562 2564 return len(self.heads()) + 1
2563 2565 finally:
2564 2566 lock.release()
2565 2567
2566 2568 def clone(self, remote, heads=[], stream=False):
2567 2569 '''clone remote repository.
2568 2570
2569 2571 keyword arguments:
2570 2572 heads: list of revs to clone (forces use of pull)
2571 2573 stream: use streaming clone if possible'''
2572 2574
2573 2575 # now, all clients that can request uncompressed clones can
2574 2576 # read repo formats supported by all servers that can serve
2575 2577 # them.
2576 2578
2577 2579 # if revlog format changes, client will have to check version
2578 2580 # and format flags on "stream" capability, and use
2579 2581 # uncompressed only if compatible.
2580 2582
2581 2583 if not stream:
2582 2584 # if the server explicitly prefers to stream (for fast LANs)
2583 2585 stream = remote.capable('stream-preferred')
2584 2586
2585 2587 if stream and not heads:
2586 2588 # 'stream' means remote revlog format is revlogv1 only
2587 2589 if remote.capable('stream'):
2588 2590 return self.stream_in(remote, set(('revlogv1',)))
2589 2591 # otherwise, 'streamreqs' contains the remote revlog format
2590 2592 streamreqs = remote.capable('streamreqs')
2591 2593 if streamreqs:
2592 2594 streamreqs = set(streamreqs.split(','))
2593 2595 # if we support it, stream in and adjust our requirements
2594 2596 if not streamreqs - self.supportedformats:
2595 2597 return self.stream_in(remote, streamreqs)
2596 2598 return self.pull(remote, heads)
2597 2599
2598 2600 def pushkey(self, namespace, key, old, new):
2599 2601 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2600 2602 old=old, new=new)
2601 2603 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2602 2604 ret = pushkey.push(self, namespace, key, old, new)
2603 2605 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2604 2606 ret=ret)
2605 2607 return ret
2606 2608
2607 2609 def listkeys(self, namespace):
2608 2610 self.hook('prelistkeys', throw=True, namespace=namespace)
2609 2611 self.ui.debug('listing keys for "%s"\n' % namespace)
2610 2612 values = pushkey.list(self, namespace)
2611 2613 self.hook('listkeys', namespace=namespace, values=values)
2612 2614 return values
2613 2615
2614 2616 def debugwireargs(self, one, two, three=None, four=None, five=None):
2615 2617 '''used to test argument passing over the wire'''
2616 2618 return "%s %s %s %s %s" % (one, two, three, four, five)
2617 2619
2618 2620 def savecommitmessage(self, text):
2619 2621 fp = self.opener('last-message.txt', 'wb')
2620 2622 try:
2621 2623 fp.write(text)
2622 2624 finally:
2623 2625 fp.close()
2624 2626 return self.pathto(fp.name[len(self.root)+1:])
2625 2627
2626 2628 # used to avoid circular references so destructors work
2627 2629 def aftertrans(files):
2628 2630 renamefiles = [tuple(t) for t in files]
2629 2631 def a():
2630 2632 for src, dest in renamefiles:
2631 2633 try:
2632 2634 util.rename(src, dest)
2633 2635 except OSError: # journal file does not yet exist
2634 2636 pass
2635 2637 return a
2636 2638
2637 2639 def undoname(fn):
2638 2640 base, name = os.path.split(fn)
2639 2641 assert name.startswith('journal')
2640 2642 return os.path.join(base, name.replace('journal', 'undo', 1))
2641 2643
2642 2644 def instance(ui, path, create):
2643 2645 return localrepository(ui, util.urllocalpath(path), create)
2644 2646
2645 2647 def islocal(path):
2646 2648 return True
General Comments 0
You need to be logged in to leave comments. Login now