##// END OF EJS Templates
localrepo: clear the filecache on _rollback() and destroyed()...
Idan Kamara -
r17324:9f94358f stable
parent child Browse files
Show More
@@ -1,2602 +1,2605 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28 28
29 29 class localpeer(peer.peerrepository):
30 30 '''peer for a local repo; reflects only the most recent API'''
31 31
32 32 def __init__(self, repo, caps=MODERNCAPS):
33 33 peer.peerrepository.__init__(self)
34 34 self._repo = repo
35 35 self.ui = repo.ui
36 36 self._caps = repo._restrictcapabilities(caps)
37 37 self.requirements = repo.requirements
38 38 self.supportedformats = repo.supportedformats
39 39
40 40 def close(self):
41 41 self._repo.close()
42 42
43 43 def _capabilities(self):
44 44 return self._caps
45 45
46 46 def local(self):
47 47 return self._repo
48 48
49 49 def canpush(self):
50 50 return True
51 51
52 52 def url(self):
53 53 return self._repo.url()
54 54
55 55 def lookup(self, key):
56 56 return self._repo.lookup(key)
57 57
58 58 def branchmap(self):
59 59 return discovery.visiblebranchmap(self._repo)
60 60
61 61 def heads(self):
62 62 return discovery.visibleheads(self._repo)
63 63
64 64 def known(self, nodes):
65 65 return self._repo.known(nodes)
66 66
67 67 def getbundle(self, source, heads=None, common=None):
68 68 return self._repo.getbundle(source, heads=heads, common=common)
69 69
70 70 # TODO We might want to move the next two calls into legacypeer and add
71 71 # unbundle instead.
72 72
73 73 def lock(self):
74 74 return self._repo.lock()
75 75
76 76 def addchangegroup(self, cg, source, url):
77 77 return self._repo.addchangegroup(cg, source, url)
78 78
79 79 def pushkey(self, namespace, key, old, new):
80 80 return self._repo.pushkey(namespace, key, old, new)
81 81
82 82 def listkeys(self, namespace):
83 83 return self._repo.listkeys(namespace)
84 84
85 85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 86 '''used to test argument passing over the wire'''
87 87 return "%s %s %s %s %s" % (one, two, three, four, five)
88 88
89 89 class locallegacypeer(localpeer):
90 90 '''peer extension which implements legacy methods too; used for tests with
91 91 restricted capabilities'''
92 92
93 93 def __init__(self, repo):
94 94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95 95
96 96 def branches(self, nodes):
97 97 return self._repo.branches(nodes)
98 98
99 99 def between(self, pairs):
100 100 return self._repo.between(pairs)
101 101
102 102 def changegroup(self, basenodes, source):
103 103 return self._repo.changegroup(basenodes, source)
104 104
105 105 def changegroupsubset(self, bases, heads, source):
106 106 return self._repo.changegroupsubset(bases, heads, source)
107 107
108 108 class localrepository(object):
109 109
110 110 supportedformats = set(('revlogv1', 'generaldelta'))
111 111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 112 'dotencode'))
113 113 openerreqs = set(('revlogv1', 'generaldelta'))
114 114 requirements = ['revlogv1']
115 115
116 116 def _baserequirements(self, create):
117 117 return self.requirements[:]
118 118
119 119 def __init__(self, baseui, path=None, create=False):
120 120 self.wopener = scmutil.opener(path, expand=True)
121 121 self.wvfs = self.wopener
122 122 self.root = self.wvfs.base
123 123 self.path = self.wvfs.join(".hg")
124 124 self.origroot = path
125 125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 126 self.opener = scmutil.opener(self.path)
127 127 self.vfs = self.opener
128 128 self.baseui = baseui
129 129 self.ui = baseui.copy()
130 130 # A list of callback to shape the phase if no data were found.
131 131 # Callback are in the form: func(repo, roots) --> processed root.
132 132 # This list it to be filled by extension during repo setup
133 133 self._phasedefaults = []
134 134 try:
135 135 self.ui.readconfig(self.join("hgrc"), self.root)
136 136 extensions.loadall(self.ui)
137 137 except IOError:
138 138 pass
139 139
140 140 if not self.vfs.isdir():
141 141 if create:
142 142 if not self.wvfs.exists():
143 143 self.wvfs.makedirs()
144 144 self.vfs.makedir(notindexed=True)
145 145 requirements = self._baserequirements(create)
146 146 if self.ui.configbool('format', 'usestore', True):
147 147 self.vfs.mkdir("store")
148 148 requirements.append("store")
149 149 if self.ui.configbool('format', 'usefncache', True):
150 150 requirements.append("fncache")
151 151 if self.ui.configbool('format', 'dotencode', True):
152 152 requirements.append('dotencode')
153 153 # create an invalid changelog
154 154 self.vfs.append(
155 155 "00changelog.i",
156 156 '\0\0\0\2' # represents revlogv2
157 157 ' dummy changelog to prevent using the old repo layout'
158 158 )
159 159 if self.ui.configbool('format', 'generaldelta', False):
160 160 requirements.append("generaldelta")
161 161 requirements = set(requirements)
162 162 else:
163 163 raise error.RepoError(_("repository %s not found") % path)
164 164 elif create:
165 165 raise error.RepoError(_("repository %s already exists") % path)
166 166 else:
167 167 try:
168 168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 169 except IOError, inst:
170 170 if inst.errno != errno.ENOENT:
171 171 raise
172 172 requirements = set()
173 173
174 174 self.sharedpath = self.path
175 175 try:
176 176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 177 if not os.path.exists(s):
178 178 raise error.RepoError(
179 179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 180 self.sharedpath = s
181 181 except IOError, inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184
185 185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
186 186 self.spath = self.store.path
187 187 self.sopener = self.store.opener
188 188 self.svfs = self.sopener
189 189 self.sjoin = self.store.join
190 190 self.opener.createmode = self.store.createmode
191 191 self._applyrequirements(requirements)
192 192 if create:
193 193 self._writerequirements()
194 194
195 195
196 196 self._branchcache = None
197 197 self._branchcachetip = None
198 198 self.filterpats = {}
199 199 self._datafilters = {}
200 200 self._transref = self._lockref = self._wlockref = None
201 201
202 202 # A cache for various files under .hg/ that tracks file changes,
203 203 # (used by the filecache decorator)
204 204 #
205 205 # Maps a property name to its util.filecacheentry
206 206 self._filecache = {}
207 207
208 208 def close(self):
209 209 pass
210 210
211 211 def _restrictcapabilities(self, caps):
212 212 return caps
213 213
214 214 def _applyrequirements(self, requirements):
215 215 self.requirements = requirements
216 216 self.sopener.options = dict((r, 1) for r in requirements
217 217 if r in self.openerreqs)
218 218
219 219 def _writerequirements(self):
220 220 reqfile = self.opener("requires", "w")
221 221 for r in self.requirements:
222 222 reqfile.write("%s\n" % r)
223 223 reqfile.close()
224 224
225 225 def _checknested(self, path):
226 226 """Determine if path is a legal nested repository."""
227 227 if not path.startswith(self.root):
228 228 return False
229 229 subpath = path[len(self.root) + 1:]
230 230 normsubpath = util.pconvert(subpath)
231 231
232 232 # XXX: Checking against the current working copy is wrong in
233 233 # the sense that it can reject things like
234 234 #
235 235 # $ hg cat -r 10 sub/x.txt
236 236 #
237 237 # if sub/ is no longer a subrepository in the working copy
238 238 # parent revision.
239 239 #
240 240 # However, it can of course also allow things that would have
241 241 # been rejected before, such as the above cat command if sub/
242 242 # is a subrepository now, but was a normal directory before.
243 243 # The old path auditor would have rejected by mistake since it
244 244 # panics when it sees sub/.hg/.
245 245 #
246 246 # All in all, checking against the working copy seems sensible
247 247 # since we want to prevent access to nested repositories on
248 248 # the filesystem *now*.
249 249 ctx = self[None]
250 250 parts = util.splitpath(subpath)
251 251 while parts:
252 252 prefix = '/'.join(parts)
253 253 if prefix in ctx.substate:
254 254 if prefix == normsubpath:
255 255 return True
256 256 else:
257 257 sub = ctx.sub(prefix)
258 258 return sub.checknested(subpath[len(prefix) + 1:])
259 259 else:
260 260 parts.pop()
261 261 return False
262 262
263 263 def peer(self):
264 264 return localpeer(self) # not cached to avoid reference cycle
265 265
266 266 @filecache('bookmarks')
267 267 def _bookmarks(self):
268 268 return bookmarks.read(self)
269 269
270 270 @filecache('bookmarks.current')
271 271 def _bookmarkcurrent(self):
272 272 return bookmarks.readcurrent(self)
273 273
274 274 def _writebookmarks(self, marks):
275 275 bookmarks.write(self)
276 276
277 277 def bookmarkheads(self, bookmark):
278 278 name = bookmark.split('@', 1)[0]
279 279 heads = []
280 280 for mark, n in self._bookmarks.iteritems():
281 281 if mark.split('@', 1)[0] == name:
282 282 heads.append(n)
283 283 return heads
284 284
285 285 @storecache('phaseroots')
286 286 def _phasecache(self):
287 287 return phases.phasecache(self, self._phasedefaults)
288 288
289 289 @storecache('obsstore')
290 290 def obsstore(self):
291 291 store = obsolete.obsstore(self.sopener)
292 292 if store and not obsolete._enabled:
293 293 # message is rare enough to not be translated
294 294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 295 self.ui.warn(msg % len(list(store)))
296 296 return store
297 297
298 298 @propertycache
299 299 def hiddenrevs(self):
300 300 """hiddenrevs: revs that should be hidden by command and tools
301 301
302 302 This set is carried on the repo to ease initialisation and lazy
303 303 loading it'll probably move back to changelog for efficienty and
304 304 consistency reason
305 305
306 306 Note that the hiddenrevs will needs invalidations when
307 307 - a new changesets is added (possible unstable above extinct)
308 308 - a new obsolete marker is added (possible new extinct changeset)
309 309 """
310 310 hidden = set()
311 311 if self.obsstore:
312 312 ### hide extinct changeset that are not accessible by any mean
313 313 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
314 314 hidden.update(self.revs(hiddenquery))
315 315 return hidden
316 316
317 317 @storecache('00changelog.i')
318 318 def changelog(self):
319 319 c = changelog.changelog(self.sopener)
320 320 if 'HG_PENDING' in os.environ:
321 321 p = os.environ['HG_PENDING']
322 322 if p.startswith(self.root):
323 323 c.readpending('00changelog.i.a')
324 324 return c
325 325
326 326 @storecache('00manifest.i')
327 327 def manifest(self):
328 328 return manifest.manifest(self.sopener)
329 329
330 330 @filecache('dirstate')
331 331 def dirstate(self):
332 332 warned = [0]
333 333 def validate(node):
334 334 try:
335 335 self.changelog.rev(node)
336 336 return node
337 337 except error.LookupError:
338 338 if not warned[0]:
339 339 warned[0] = True
340 340 self.ui.warn(_("warning: ignoring unknown"
341 341 " working parent %s!\n") % short(node))
342 342 return nullid
343 343
344 344 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
345 345
346 346 def __getitem__(self, changeid):
347 347 if changeid is None:
348 348 return context.workingctx(self)
349 349 return context.changectx(self, changeid)
350 350
351 351 def __contains__(self, changeid):
352 352 try:
353 353 return bool(self.lookup(changeid))
354 354 except error.RepoLookupError:
355 355 return False
356 356
357 357 def __nonzero__(self):
358 358 return True
359 359
360 360 def __len__(self):
361 361 return len(self.changelog)
362 362
363 363 def __iter__(self):
364 364 for i in xrange(len(self)):
365 365 yield i
366 366
367 367 def revs(self, expr, *args):
368 368 '''Return a list of revisions matching the given revset'''
369 369 expr = revset.formatspec(expr, *args)
370 370 m = revset.match(None, expr)
371 371 return [r for r in m(self, range(len(self)))]
372 372
373 373 def set(self, expr, *args):
374 374 '''
375 375 Yield a context for each matching revision, after doing arg
376 376 replacement via revset.formatspec
377 377 '''
378 378 for r in self.revs(expr, *args):
379 379 yield self[r]
380 380
381 381 def url(self):
382 382 return 'file:' + self.root
383 383
384 384 def hook(self, name, throw=False, **args):
385 385 return hook.hook(self.ui, self, name, throw, **args)
386 386
387 387 tag_disallowed = ':\r\n'
388 388
389 389 def _tag(self, names, node, message, local, user, date, extra={}):
390 390 if isinstance(names, str):
391 391 allchars = names
392 392 names = (names,)
393 393 else:
394 394 allchars = ''.join(names)
395 395 for c in self.tag_disallowed:
396 396 if c in allchars:
397 397 raise util.Abort(_('%r cannot be used in a tag name') % c)
398 398
399 399 branches = self.branchmap()
400 400 for name in names:
401 401 self.hook('pretag', throw=True, node=hex(node), tag=name,
402 402 local=local)
403 403 if name in branches:
404 404 self.ui.warn(_("warning: tag %s conflicts with existing"
405 405 " branch name\n") % name)
406 406
407 407 def writetags(fp, names, munge, prevtags):
408 408 fp.seek(0, 2)
409 409 if prevtags and prevtags[-1] != '\n':
410 410 fp.write('\n')
411 411 for name in names:
412 412 m = munge and munge(name) or name
413 413 if (self._tagscache.tagtypes and
414 414 name in self._tagscache.tagtypes):
415 415 old = self.tags().get(name, nullid)
416 416 fp.write('%s %s\n' % (hex(old), m))
417 417 fp.write('%s %s\n' % (hex(node), m))
418 418 fp.close()
419 419
420 420 prevtags = ''
421 421 if local:
422 422 try:
423 423 fp = self.opener('localtags', 'r+')
424 424 except IOError:
425 425 fp = self.opener('localtags', 'a')
426 426 else:
427 427 prevtags = fp.read()
428 428
429 429 # local tags are stored in the current charset
430 430 writetags(fp, names, None, prevtags)
431 431 for name in names:
432 432 self.hook('tag', node=hex(node), tag=name, local=local)
433 433 return
434 434
435 435 try:
436 436 fp = self.wfile('.hgtags', 'rb+')
437 437 except IOError, e:
438 438 if e.errno != errno.ENOENT:
439 439 raise
440 440 fp = self.wfile('.hgtags', 'ab')
441 441 else:
442 442 prevtags = fp.read()
443 443
444 444 # committed tags are stored in UTF-8
445 445 writetags(fp, names, encoding.fromlocal, prevtags)
446 446
447 447 fp.close()
448 448
449 449 self.invalidatecaches()
450 450
451 451 if '.hgtags' not in self.dirstate:
452 452 self[None].add(['.hgtags'])
453 453
454 454 m = matchmod.exact(self.root, '', ['.hgtags'])
455 455 tagnode = self.commit(message, user, date, extra=extra, match=m)
456 456
457 457 for name in names:
458 458 self.hook('tag', node=hex(node), tag=name, local=local)
459 459
460 460 return tagnode
461 461
462 462 def tag(self, names, node, message, local, user, date):
463 463 '''tag a revision with one or more symbolic names.
464 464
465 465 names is a list of strings or, when adding a single tag, names may be a
466 466 string.
467 467
468 468 if local is True, the tags are stored in a per-repository file.
469 469 otherwise, they are stored in the .hgtags file, and a new
470 470 changeset is committed with the change.
471 471
472 472 keyword arguments:
473 473
474 474 local: whether to store tags in non-version-controlled file
475 475 (default False)
476 476
477 477 message: commit message to use if committing
478 478
479 479 user: name of user to use if committing
480 480
481 481 date: date tuple to use if committing'''
482 482
483 483 if not local:
484 484 for x in self.status()[:5]:
485 485 if '.hgtags' in x:
486 486 raise util.Abort(_('working copy of .hgtags is changed '
487 487 '(please commit .hgtags manually)'))
488 488
489 489 self.tags() # instantiate the cache
490 490 self._tag(names, node, message, local, user, date)
491 491
492 492 @propertycache
493 493 def _tagscache(self):
494 494 '''Returns a tagscache object that contains various tags related
495 495 caches.'''
496 496
497 497 # This simplifies its cache management by having one decorated
498 498 # function (this one) and the rest simply fetch things from it.
499 499 class tagscache(object):
500 500 def __init__(self):
501 501 # These two define the set of tags for this repository. tags
502 502 # maps tag name to node; tagtypes maps tag name to 'global' or
503 503 # 'local'. (Global tags are defined by .hgtags across all
504 504 # heads, and local tags are defined in .hg/localtags.)
505 505 # They constitute the in-memory cache of tags.
506 506 self.tags = self.tagtypes = None
507 507
508 508 self.nodetagscache = self.tagslist = None
509 509
510 510 cache = tagscache()
511 511 cache.tags, cache.tagtypes = self._findtags()
512 512
513 513 return cache
514 514
515 515 def tags(self):
516 516 '''return a mapping of tag to node'''
517 517 t = {}
518 518 for k, v in self._tagscache.tags.iteritems():
519 519 try:
520 520 # ignore tags to unknown nodes
521 521 self.changelog.rev(v)
522 522 t[k] = v
523 523 except (error.LookupError, ValueError):
524 524 pass
525 525 return t
526 526
527 527 def _findtags(self):
528 528 '''Do the hard work of finding tags. Return a pair of dicts
529 529 (tags, tagtypes) where tags maps tag name to node, and tagtypes
530 530 maps tag name to a string like \'global\' or \'local\'.
531 531 Subclasses or extensions are free to add their own tags, but
532 532 should be aware that the returned dicts will be retained for the
533 533 duration of the localrepo object.'''
534 534
535 535 # XXX what tagtype should subclasses/extensions use? Currently
536 536 # mq and bookmarks add tags, but do not set the tagtype at all.
537 537 # Should each extension invent its own tag type? Should there
538 538 # be one tagtype for all such "virtual" tags? Or is the status
539 539 # quo fine?
540 540
541 541 alltags = {} # map tag name to (node, hist)
542 542 tagtypes = {}
543 543
544 544 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
545 545 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
546 546
547 547 # Build the return dicts. Have to re-encode tag names because
548 548 # the tags module always uses UTF-8 (in order not to lose info
549 549 # writing to the cache), but the rest of Mercurial wants them in
550 550 # local encoding.
551 551 tags = {}
552 552 for (name, (node, hist)) in alltags.iteritems():
553 553 if node != nullid:
554 554 tags[encoding.tolocal(name)] = node
555 555 tags['tip'] = self.changelog.tip()
556 556 tagtypes = dict([(encoding.tolocal(name), value)
557 557 for (name, value) in tagtypes.iteritems()])
558 558 return (tags, tagtypes)
559 559
560 560 def tagtype(self, tagname):
561 561 '''
562 562 return the type of the given tag. result can be:
563 563
564 564 'local' : a local tag
565 565 'global' : a global tag
566 566 None : tag does not exist
567 567 '''
568 568
569 569 return self._tagscache.tagtypes.get(tagname)
570 570
571 571 def tagslist(self):
572 572 '''return a list of tags ordered by revision'''
573 573 if not self._tagscache.tagslist:
574 574 l = []
575 575 for t, n in self.tags().iteritems():
576 576 r = self.changelog.rev(n)
577 577 l.append((r, t, n))
578 578 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
579 579
580 580 return self._tagscache.tagslist
581 581
582 582 def nodetags(self, node):
583 583 '''return the tags associated with a node'''
584 584 if not self._tagscache.nodetagscache:
585 585 nodetagscache = {}
586 586 for t, n in self._tagscache.tags.iteritems():
587 587 nodetagscache.setdefault(n, []).append(t)
588 588 for tags in nodetagscache.itervalues():
589 589 tags.sort()
590 590 self._tagscache.nodetagscache = nodetagscache
591 591 return self._tagscache.nodetagscache.get(node, [])
592 592
593 593 def nodebookmarks(self, node):
594 594 marks = []
595 595 for bookmark, n in self._bookmarks.iteritems():
596 596 if n == node:
597 597 marks.append(bookmark)
598 598 return sorted(marks)
599 599
600 600 def _branchtags(self, partial, lrev):
601 601 # TODO: rename this function?
602 602 tiprev = len(self) - 1
603 603 if lrev != tiprev:
604 604 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
605 605 self._updatebranchcache(partial, ctxgen)
606 606 self._writebranchcache(partial, self.changelog.tip(), tiprev)
607 607
608 608 return partial
609 609
610 610 def updatebranchcache(self):
611 611 tip = self.changelog.tip()
612 612 if self._branchcache is not None and self._branchcachetip == tip:
613 613 return
614 614
615 615 oldtip = self._branchcachetip
616 616 self._branchcachetip = tip
617 617 if oldtip is None or oldtip not in self.changelog.nodemap:
618 618 partial, last, lrev = self._readbranchcache()
619 619 else:
620 620 lrev = self.changelog.rev(oldtip)
621 621 partial = self._branchcache
622 622
623 623 self._branchtags(partial, lrev)
624 624 # this private cache holds all heads (not just the branch tips)
625 625 self._branchcache = partial
626 626
627 627 def branchmap(self):
628 628 '''returns a dictionary {branch: [branchheads]}'''
629 629 self.updatebranchcache()
630 630 return self._branchcache
631 631
632 632 def _branchtip(self, heads):
633 633 '''return the tipmost branch head in heads'''
634 634 tip = heads[-1]
635 635 for h in reversed(heads):
636 636 if not self[h].closesbranch():
637 637 tip = h
638 638 break
639 639 return tip
640 640
641 641 def branchtip(self, branch):
642 642 '''return the tip node for a given branch'''
643 643 if branch not in self.branchmap():
644 644 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
645 645 return self._branchtip(self.branchmap()[branch])
646 646
647 647 def branchtags(self):
648 648 '''return a dict where branch names map to the tipmost head of
649 649 the branch, open heads come before closed'''
650 650 bt = {}
651 651 for bn, heads in self.branchmap().iteritems():
652 652 bt[bn] = self._branchtip(heads)
653 653 return bt
654 654
655 655 def _readbranchcache(self):
656 656 partial = {}
657 657 try:
658 658 f = self.opener("cache/branchheads")
659 659 lines = f.read().split('\n')
660 660 f.close()
661 661 except (IOError, OSError):
662 662 return {}, nullid, nullrev
663 663
664 664 try:
665 665 last, lrev = lines.pop(0).split(" ", 1)
666 666 last, lrev = bin(last), int(lrev)
667 667 if lrev >= len(self) or self[lrev].node() != last:
668 668 # invalidate the cache
669 669 raise ValueError('invalidating branch cache (tip differs)')
670 670 for l in lines:
671 671 if not l:
672 672 continue
673 673 node, label = l.split(" ", 1)
674 674 label = encoding.tolocal(label.strip())
675 675 if not node in self:
676 676 raise ValueError('invalidating branch cache because node '+
677 677 '%s does not exist' % node)
678 678 partial.setdefault(label, []).append(bin(node))
679 679 except KeyboardInterrupt:
680 680 raise
681 681 except Exception, inst:
682 682 if self.ui.debugflag:
683 683 self.ui.warn(str(inst), '\n')
684 684 partial, last, lrev = {}, nullid, nullrev
685 685 return partial, last, lrev
686 686
687 687 def _writebranchcache(self, branches, tip, tiprev):
688 688 try:
689 689 f = self.opener("cache/branchheads", "w", atomictemp=True)
690 690 f.write("%s %s\n" % (hex(tip), tiprev))
691 691 for label, nodes in branches.iteritems():
692 692 for node in nodes:
693 693 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
694 694 f.close()
695 695 except (IOError, OSError):
696 696 pass
697 697
698 698 def _updatebranchcache(self, partial, ctxgen):
699 699 """Given a branchhead cache, partial, that may have extra nodes or be
700 700 missing heads, and a generator of nodes that are at least a superset of
701 701 heads missing, this function updates partial to be correct.
702 702 """
703 703 # collect new branch entries
704 704 newbranches = {}
705 705 for c in ctxgen:
706 706 newbranches.setdefault(c.branch(), []).append(c.node())
707 707 # if older branchheads are reachable from new ones, they aren't
708 708 # really branchheads. Note checking parents is insufficient:
709 709 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
710 710 for branch, newnodes in newbranches.iteritems():
711 711 bheads = partial.setdefault(branch, [])
712 712 # Remove candidate heads that no longer are in the repo (e.g., as
713 713 # the result of a strip that just happened). Avoid using 'node in
714 714 # self' here because that dives down into branchcache code somewhat
715 715 # recrusively.
716 716 bheadrevs = [self.changelog.rev(node) for node in bheads
717 717 if self.changelog.hasnode(node)]
718 718 newheadrevs = [self.changelog.rev(node) for node in newnodes
719 719 if self.changelog.hasnode(node)]
720 720 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
721 721 # Remove duplicates - nodes that are in newheadrevs and are already
722 722 # in bheadrevs. This can happen if you strip a node whose parent
723 723 # was already a head (because they're on different branches).
724 724 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
725 725
726 726 # Starting from tip means fewer passes over reachable. If we know
727 727 # the new candidates are not ancestors of existing heads, we don't
728 728 # have to examine ancestors of existing heads
729 729 if ctxisnew:
730 730 iterrevs = sorted(newheadrevs)
731 731 else:
732 732 iterrevs = list(bheadrevs)
733 733
734 734 # This loop prunes out two kinds of heads - heads that are
735 735 # superceded by a head in newheadrevs, and newheadrevs that are not
736 736 # heads because an existing head is their descendant.
737 737 while iterrevs:
738 738 latest = iterrevs.pop()
739 739 if latest not in bheadrevs:
740 740 continue
741 741 ancestors = set(self.changelog.ancestors([latest],
742 742 bheadrevs[0]))
743 743 if ancestors:
744 744 bheadrevs = [b for b in bheadrevs if b not in ancestors]
745 745 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
746 746
747 747 # There may be branches that cease to exist when the last commit in the
748 748 # branch was stripped. This code filters them out. Note that the
749 749 # branch that ceased to exist may not be in newbranches because
750 750 # newbranches is the set of candidate heads, which when you strip the
751 751 # last commit in a branch will be the parent branch.
752 752 for branch in partial.keys():
753 753 nodes = [head for head in partial[branch]
754 754 if self.changelog.hasnode(head)]
755 755 if not nodes:
756 756 del partial[branch]
757 757
758 758 def lookup(self, key):
759 759 return self[key].node()
760 760
761 761 def lookupbranch(self, key, remote=None):
762 762 repo = remote or self
763 763 if key in repo.branchmap():
764 764 return key
765 765
766 766 repo = (remote and remote.local()) and remote or self
767 767 return repo[key].branch()
768 768
769 769 def known(self, nodes):
770 770 nm = self.changelog.nodemap
771 771 pc = self._phasecache
772 772 result = []
773 773 for n in nodes:
774 774 r = nm.get(n)
775 775 resp = not (r is None or pc.phase(self, r) >= phases.secret)
776 776 result.append(resp)
777 777 return result
778 778
779 779 def local(self):
780 780 return self
781 781
782 782 def cancopy(self):
783 783 return self.local() # so statichttprepo's override of local() works
784 784
785 785 def join(self, f):
786 786 return os.path.join(self.path, f)
787 787
788 788 def wjoin(self, f):
789 789 return os.path.join(self.root, f)
790 790
791 791 def file(self, f):
792 792 if f[0] == '/':
793 793 f = f[1:]
794 794 return filelog.filelog(self.sopener, f)
795 795
796 796 def changectx(self, changeid):
797 797 return self[changeid]
798 798
799 799 def parents(self, changeid=None):
800 800 '''get list of changectxs for parents of changeid'''
801 801 return self[changeid].parents()
802 802
803 803 def setparents(self, p1, p2=nullid):
804 804 copies = self.dirstate.setparents(p1, p2)
805 805 if copies:
806 806 # Adjust copy records, the dirstate cannot do it, it
807 807 # requires access to parents manifests. Preserve them
808 808 # only for entries added to first parent.
809 809 pctx = self[p1]
810 810 for f in copies:
811 811 if f not in pctx and copies[f] in pctx:
812 812 self.dirstate.copy(copies[f], f)
813 813
814 814 def filectx(self, path, changeid=None, fileid=None):
815 815 """changeid can be a changeset revision, node, or tag.
816 816 fileid can be a file revision or node."""
817 817 return context.filectx(self, path, changeid, fileid)
818 818
819 819 def getcwd(self):
820 820 return self.dirstate.getcwd()
821 821
822 822 def pathto(self, f, cwd=None):
823 823 return self.dirstate.pathto(f, cwd)
824 824
825 825 def wfile(self, f, mode='r'):
826 826 return self.wopener(f, mode)
827 827
828 828 def _link(self, f):
829 829 return os.path.islink(self.wjoin(f))
830 830
831 831 def _loadfilter(self, filter):
832 832 if filter not in self.filterpats:
833 833 l = []
834 834 for pat, cmd in self.ui.configitems(filter):
835 835 if cmd == '!':
836 836 continue
837 837 mf = matchmod.match(self.root, '', [pat])
838 838 fn = None
839 839 params = cmd
840 840 for name, filterfn in self._datafilters.iteritems():
841 841 if cmd.startswith(name):
842 842 fn = filterfn
843 843 params = cmd[len(name):].lstrip()
844 844 break
845 845 if not fn:
846 846 fn = lambda s, c, **kwargs: util.filter(s, c)
847 847 # Wrap old filters not supporting keyword arguments
848 848 if not inspect.getargspec(fn)[2]:
849 849 oldfn = fn
850 850 fn = lambda s, c, **kwargs: oldfn(s, c)
851 851 l.append((mf, fn, params))
852 852 self.filterpats[filter] = l
853 853 return self.filterpats[filter]
854 854
855 855 def _filter(self, filterpats, filename, data):
856 856 for mf, fn, cmd in filterpats:
857 857 if mf(filename):
858 858 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
859 859 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
860 860 break
861 861
862 862 return data
863 863
864 864 @propertycache
865 865 def _encodefilterpats(self):
866 866 return self._loadfilter('encode')
867 867
868 868 @propertycache
869 869 def _decodefilterpats(self):
870 870 return self._loadfilter('decode')
871 871
872 872 def adddatafilter(self, name, filter):
873 873 self._datafilters[name] = filter
874 874
875 875 def wread(self, filename):
876 876 if self._link(filename):
877 877 data = os.readlink(self.wjoin(filename))
878 878 else:
879 879 data = self.wopener.read(filename)
880 880 return self._filter(self._encodefilterpats, filename, data)
881 881
882 882 def wwrite(self, filename, data, flags):
883 883 data = self._filter(self._decodefilterpats, filename, data)
884 884 if 'l' in flags:
885 885 self.wopener.symlink(data, filename)
886 886 else:
887 887 self.wopener.write(filename, data)
888 888 if 'x' in flags:
889 889 util.setflags(self.wjoin(filename), False, True)
890 890
891 891 def wwritedata(self, filename, data):
892 892 return self._filter(self._decodefilterpats, filename, data)
893 893
894 894 def transaction(self, desc):
895 895 tr = self._transref and self._transref() or None
896 896 if tr and tr.running():
897 897 return tr.nest()
898 898
899 899 # abort here if the journal already exists
900 900 if os.path.exists(self.sjoin("journal")):
901 901 raise error.RepoError(
902 902 _("abandoned transaction found - run hg recover"))
903 903
904 904 self._writejournal(desc)
905 905 renames = [(x, undoname(x)) for x in self._journalfiles()]
906 906
907 907 tr = transaction.transaction(self.ui.warn, self.sopener,
908 908 self.sjoin("journal"),
909 909 aftertrans(renames),
910 910 self.store.createmode)
911 911 self._transref = weakref.ref(tr)
912 912 return tr
913 913
914 914 def _journalfiles(self):
915 915 return (self.sjoin('journal'), self.join('journal.dirstate'),
916 916 self.join('journal.branch'), self.join('journal.desc'),
917 917 self.join('journal.bookmarks'),
918 918 self.sjoin('journal.phaseroots'))
919 919
920 920 def undofiles(self):
921 921 return [undoname(x) for x in self._journalfiles()]
922 922
923 923 def _writejournal(self, desc):
924 924 self.opener.write("journal.dirstate",
925 925 self.opener.tryread("dirstate"))
926 926 self.opener.write("journal.branch",
927 927 encoding.fromlocal(self.dirstate.branch()))
928 928 self.opener.write("journal.desc",
929 929 "%d\n%s\n" % (len(self), desc))
930 930 self.opener.write("journal.bookmarks",
931 931 self.opener.tryread("bookmarks"))
932 932 self.sopener.write("journal.phaseroots",
933 933 self.sopener.tryread("phaseroots"))
934 934
935 935 def recover(self):
936 936 lock = self.lock()
937 937 try:
938 938 if os.path.exists(self.sjoin("journal")):
939 939 self.ui.status(_("rolling back interrupted transaction\n"))
940 940 transaction.rollback(self.sopener, self.sjoin("journal"),
941 941 self.ui.warn)
942 942 self.invalidate()
943 943 return True
944 944 else:
945 945 self.ui.warn(_("no interrupted transaction available\n"))
946 946 return False
947 947 finally:
948 948 lock.release()
949 949
950 950 def rollback(self, dryrun=False, force=False):
951 951 wlock = lock = None
952 952 try:
953 953 wlock = self.wlock()
954 954 lock = self.lock()
955 955 if os.path.exists(self.sjoin("undo")):
956 956 return self._rollback(dryrun, force)
957 957 else:
958 958 self.ui.warn(_("no rollback information available\n"))
959 959 return 1
960 960 finally:
961 961 release(lock, wlock)
962 962
963 963 def _rollback(self, dryrun, force):
964 964 ui = self.ui
965 965 try:
966 966 args = self.opener.read('undo.desc').splitlines()
967 967 (oldlen, desc, detail) = (int(args[0]), args[1], None)
968 968 if len(args) >= 3:
969 969 detail = args[2]
970 970 oldtip = oldlen - 1
971 971
972 972 if detail and ui.verbose:
973 973 msg = (_('repository tip rolled back to revision %s'
974 974 ' (undo %s: %s)\n')
975 975 % (oldtip, desc, detail))
976 976 else:
977 977 msg = (_('repository tip rolled back to revision %s'
978 978 ' (undo %s)\n')
979 979 % (oldtip, desc))
980 980 except IOError:
981 981 msg = _('rolling back unknown transaction\n')
982 982 desc = None
983 983
984 984 if not force and self['.'] != self['tip'] and desc == 'commit':
985 985 raise util.Abort(
986 986 _('rollback of last commit while not checked out '
987 987 'may lose data'), hint=_('use -f to force'))
988 988
989 989 ui.status(msg)
990 990 if dryrun:
991 991 return 0
992 992
993 993 parents = self.dirstate.parents()
994 994 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
995 995 if os.path.exists(self.join('undo.bookmarks')):
996 996 util.rename(self.join('undo.bookmarks'),
997 997 self.join('bookmarks'))
998 998 if os.path.exists(self.sjoin('undo.phaseroots')):
999 999 util.rename(self.sjoin('undo.phaseroots'),
1000 1000 self.sjoin('phaseroots'))
1001 1001 self.invalidate()
1002 1002
1003 # Discard all cache entries to force reloading everything.
1004 self._filecache.clear()
1005
1003 1006 parentgone = (parents[0] not in self.changelog.nodemap or
1004 1007 parents[1] not in self.changelog.nodemap)
1005 1008 if parentgone:
1006 1009 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1007 1010 try:
1008 1011 branch = self.opener.read('undo.branch')
1009 1012 self.dirstate.setbranch(branch)
1010 1013 except IOError:
1011 1014 ui.warn(_('named branch could not be reset: '
1012 1015 'current branch is still \'%s\'\n')
1013 1016 % self.dirstate.branch())
1014 1017
1015 1018 self.dirstate.invalidate()
1016 1019 parents = tuple([p.rev() for p in self.parents()])
1017 1020 if len(parents) > 1:
1018 1021 ui.status(_('working directory now based on '
1019 1022 'revisions %d and %d\n') % parents)
1020 1023 else:
1021 1024 ui.status(_('working directory now based on '
1022 1025 'revision %d\n') % parents)
1023 1026 # TODO: if we know which new heads may result from this rollback, pass
1024 1027 # them to destroy(), which will prevent the branchhead cache from being
1025 1028 # invalidated.
1026 1029 self.destroyed()
1027 1030 return 0
1028 1031
1029 1032 def invalidatecaches(self):
1030 1033 def delcache(name):
1031 1034 try:
1032 1035 delattr(self, name)
1033 1036 except AttributeError:
1034 1037 pass
1035 1038
1036 1039 delcache('_tagscache')
1037 1040
1038 1041 self._branchcache = None # in UTF-8
1039 1042 self._branchcachetip = None
1040 1043
1041 1044 def invalidatedirstate(self):
1042 1045 '''Invalidates the dirstate, causing the next call to dirstate
1043 1046 to check if it was modified since the last time it was read,
1044 1047 rereading it if it has.
1045 1048
1046 1049 This is different to dirstate.invalidate() that it doesn't always
1047 1050 rereads the dirstate. Use dirstate.invalidate() if you want to
1048 1051 explicitly read the dirstate again (i.e. restoring it to a previous
1049 1052 known good state).'''
1050 1053 if 'dirstate' in self.__dict__:
1051 1054 for k in self.dirstate._filecache:
1052 1055 try:
1053 1056 delattr(self.dirstate, k)
1054 1057 except AttributeError:
1055 1058 pass
1056 1059 delattr(self, 'dirstate')
1057 1060
1058 1061 def invalidate(self):
1059 1062 for k in self._filecache:
1060 1063 # dirstate is invalidated separately in invalidatedirstate()
1061 1064 if k == 'dirstate':
1062 1065 continue
1063 1066
1064 1067 try:
1065 1068 delattr(self, k)
1066 1069 except AttributeError:
1067 1070 pass
1068 1071 self.invalidatecaches()
1069 1072
1070 # Discard all cache entries to force reloading everything.
1071 self._filecache.clear()
1072
1073 1073 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1074 1074 try:
1075 1075 l = lock.lock(lockname, 0, releasefn, desc=desc)
1076 1076 except error.LockHeld, inst:
1077 1077 if not wait:
1078 1078 raise
1079 1079 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1080 1080 (desc, inst.locker))
1081 1081 # default to 600 seconds timeout
1082 1082 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1083 1083 releasefn, desc=desc)
1084 1084 if acquirefn:
1085 1085 acquirefn()
1086 1086 return l
1087 1087
1088 1088 def _afterlock(self, callback):
1089 1089 """add a callback to the current repository lock.
1090 1090
1091 1091 The callback will be executed on lock release."""
1092 1092 l = self._lockref and self._lockref()
1093 1093 if l:
1094 1094 l.postrelease.append(callback)
1095 1095 else:
1096 1096 callback()
1097 1097
1098 1098 def lock(self, wait=True):
1099 1099 '''Lock the repository store (.hg/store) and return a weak reference
1100 1100 to the lock. Use this before modifying the store (e.g. committing or
1101 1101 stripping). If you are opening a transaction, get a lock as well.)'''
1102 1102 l = self._lockref and self._lockref()
1103 1103 if l is not None and l.held:
1104 1104 l.lock()
1105 1105 return l
1106 1106
1107 1107 def unlock():
1108 1108 self.store.write()
1109 1109 if '_phasecache' in vars(self):
1110 1110 self._phasecache.write()
1111 1111 for k, ce in self._filecache.items():
1112 1112 if k == 'dirstate':
1113 1113 continue
1114 1114 ce.refresh()
1115 1115
1116 1116 l = self._lock(self.sjoin("lock"), wait, unlock,
1117 1117 self.invalidate, _('repository %s') % self.origroot)
1118 1118 self._lockref = weakref.ref(l)
1119 1119 return l
1120 1120
1121 1121 def wlock(self, wait=True):
1122 1122 '''Lock the non-store parts of the repository (everything under
1123 1123 .hg except .hg/store) and return a weak reference to the lock.
1124 1124 Use this before modifying files in .hg.'''
1125 1125 l = self._wlockref and self._wlockref()
1126 1126 if l is not None and l.held:
1127 1127 l.lock()
1128 1128 return l
1129 1129
1130 1130 def unlock():
1131 1131 self.dirstate.write()
1132 1132 ce = self._filecache.get('dirstate')
1133 1133 if ce:
1134 1134 ce.refresh()
1135 1135
1136 1136 l = self._lock(self.join("wlock"), wait, unlock,
1137 1137 self.invalidatedirstate, _('working directory of %s') %
1138 1138 self.origroot)
1139 1139 self._wlockref = weakref.ref(l)
1140 1140 return l
1141 1141
1142 1142 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1143 1143 """
1144 1144 commit an individual file as part of a larger transaction
1145 1145 """
1146 1146
1147 1147 fname = fctx.path()
1148 1148 text = fctx.data()
1149 1149 flog = self.file(fname)
1150 1150 fparent1 = manifest1.get(fname, nullid)
1151 1151 fparent2 = fparent2o = manifest2.get(fname, nullid)
1152 1152
1153 1153 meta = {}
1154 1154 copy = fctx.renamed()
1155 1155 if copy and copy[0] != fname:
1156 1156 # Mark the new revision of this file as a copy of another
1157 1157 # file. This copy data will effectively act as a parent
1158 1158 # of this new revision. If this is a merge, the first
1159 1159 # parent will be the nullid (meaning "look up the copy data")
1160 1160 # and the second one will be the other parent. For example:
1161 1161 #
1162 1162 # 0 --- 1 --- 3 rev1 changes file foo
1163 1163 # \ / rev2 renames foo to bar and changes it
1164 1164 # \- 2 -/ rev3 should have bar with all changes and
1165 1165 # should record that bar descends from
1166 1166 # bar in rev2 and foo in rev1
1167 1167 #
1168 1168 # this allows this merge to succeed:
1169 1169 #
1170 1170 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1171 1171 # \ / merging rev3 and rev4 should use bar@rev2
1172 1172 # \- 2 --- 4 as the merge base
1173 1173 #
1174 1174
1175 1175 cfname = copy[0]
1176 1176 crev = manifest1.get(cfname)
1177 1177 newfparent = fparent2
1178 1178
1179 1179 if manifest2: # branch merge
1180 1180 if fparent2 == nullid or crev is None: # copied on remote side
1181 1181 if cfname in manifest2:
1182 1182 crev = manifest2[cfname]
1183 1183 newfparent = fparent1
1184 1184
1185 1185 # find source in nearest ancestor if we've lost track
1186 1186 if not crev:
1187 1187 self.ui.debug(" %s: searching for copy revision for %s\n" %
1188 1188 (fname, cfname))
1189 1189 for ancestor in self[None].ancestors():
1190 1190 if cfname in ancestor:
1191 1191 crev = ancestor[cfname].filenode()
1192 1192 break
1193 1193
1194 1194 if crev:
1195 1195 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1196 1196 meta["copy"] = cfname
1197 1197 meta["copyrev"] = hex(crev)
1198 1198 fparent1, fparent2 = nullid, newfparent
1199 1199 else:
1200 1200 self.ui.warn(_("warning: can't find ancestor for '%s' "
1201 1201 "copied from '%s'!\n") % (fname, cfname))
1202 1202
1203 1203 elif fparent2 != nullid:
1204 1204 # is one parent an ancestor of the other?
1205 1205 fparentancestor = flog.ancestor(fparent1, fparent2)
1206 1206 if fparentancestor == fparent1:
1207 1207 fparent1, fparent2 = fparent2, nullid
1208 1208 elif fparentancestor == fparent2:
1209 1209 fparent2 = nullid
1210 1210
1211 1211 # is the file changed?
1212 1212 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1213 1213 changelist.append(fname)
1214 1214 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1215 1215
1216 1216 # are just the flags changed during merge?
1217 1217 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1218 1218 changelist.append(fname)
1219 1219
1220 1220 return fparent1
1221 1221
1222 1222 def commit(self, text="", user=None, date=None, match=None, force=False,
1223 1223 editor=False, extra={}):
1224 1224 """Add a new revision to current repository.
1225 1225
1226 1226 Revision information is gathered from the working directory,
1227 1227 match can be used to filter the committed files. If editor is
1228 1228 supplied, it is called to get a commit message.
1229 1229 """
1230 1230
1231 1231 def fail(f, msg):
1232 1232 raise util.Abort('%s: %s' % (f, msg))
1233 1233
1234 1234 if not match:
1235 1235 match = matchmod.always(self.root, '')
1236 1236
1237 1237 if not force:
1238 1238 vdirs = []
1239 1239 match.dir = vdirs.append
1240 1240 match.bad = fail
1241 1241
1242 1242 wlock = self.wlock()
1243 1243 try:
1244 1244 wctx = self[None]
1245 1245 merge = len(wctx.parents()) > 1
1246 1246
1247 1247 if (not force and merge and match and
1248 1248 (match.files() or match.anypats())):
1249 1249 raise util.Abort(_('cannot partially commit a merge '
1250 1250 '(do not specify files or patterns)'))
1251 1251
1252 1252 changes = self.status(match=match, clean=force)
1253 1253 if force:
1254 1254 changes[0].extend(changes[6]) # mq may commit unchanged files
1255 1255
1256 1256 # check subrepos
1257 1257 subs = []
1258 1258 commitsubs = set()
1259 1259 newstate = wctx.substate.copy()
1260 1260 # only manage subrepos and .hgsubstate if .hgsub is present
1261 1261 if '.hgsub' in wctx:
1262 1262 # we'll decide whether to track this ourselves, thanks
1263 1263 if '.hgsubstate' in changes[0]:
1264 1264 changes[0].remove('.hgsubstate')
1265 1265 if '.hgsubstate' in changes[2]:
1266 1266 changes[2].remove('.hgsubstate')
1267 1267
1268 1268 # compare current state to last committed state
1269 1269 # build new substate based on last committed state
1270 1270 oldstate = wctx.p1().substate
1271 1271 for s in sorted(newstate.keys()):
1272 1272 if not match(s):
1273 1273 # ignore working copy, use old state if present
1274 1274 if s in oldstate:
1275 1275 newstate[s] = oldstate[s]
1276 1276 continue
1277 1277 if not force:
1278 1278 raise util.Abort(
1279 1279 _("commit with new subrepo %s excluded") % s)
1280 1280 if wctx.sub(s).dirty(True):
1281 1281 if not self.ui.configbool('ui', 'commitsubrepos'):
1282 1282 raise util.Abort(
1283 1283 _("uncommitted changes in subrepo %s") % s,
1284 1284 hint=_("use --subrepos for recursive commit"))
1285 1285 subs.append(s)
1286 1286 commitsubs.add(s)
1287 1287 else:
1288 1288 bs = wctx.sub(s).basestate()
1289 1289 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1290 1290 if oldstate.get(s, (None, None, None))[1] != bs:
1291 1291 subs.append(s)
1292 1292
1293 1293 # check for removed subrepos
1294 1294 for p in wctx.parents():
1295 1295 r = [s for s in p.substate if s not in newstate]
1296 1296 subs += [s for s in r if match(s)]
1297 1297 if subs:
1298 1298 if (not match('.hgsub') and
1299 1299 '.hgsub' in (wctx.modified() + wctx.added())):
1300 1300 raise util.Abort(
1301 1301 _("can't commit subrepos without .hgsub"))
1302 1302 changes[0].insert(0, '.hgsubstate')
1303 1303
1304 1304 elif '.hgsub' in changes[2]:
1305 1305 # clean up .hgsubstate when .hgsub is removed
1306 1306 if ('.hgsubstate' in wctx and
1307 1307 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1308 1308 changes[2].insert(0, '.hgsubstate')
1309 1309
1310 1310 # make sure all explicit patterns are matched
1311 1311 if not force and match.files():
1312 1312 matched = set(changes[0] + changes[1] + changes[2])
1313 1313
1314 1314 for f in match.files():
1315 1315 if f == '.' or f in matched or f in wctx.substate:
1316 1316 continue
1317 1317 if f in changes[3]: # missing
1318 1318 fail(f, _('file not found!'))
1319 1319 if f in vdirs: # visited directory
1320 1320 d = f + '/'
1321 1321 for mf in matched:
1322 1322 if mf.startswith(d):
1323 1323 break
1324 1324 else:
1325 1325 fail(f, _("no match under directory!"))
1326 1326 elif f not in self.dirstate:
1327 1327 fail(f, _("file not tracked!"))
1328 1328
1329 1329 if (not force and not extra.get("close") and not merge
1330 1330 and not (changes[0] or changes[1] or changes[2])
1331 1331 and wctx.branch() == wctx.p1().branch()):
1332 1332 return None
1333 1333
1334 1334 if merge and changes[3]:
1335 1335 raise util.Abort(_("cannot commit merge with missing files"))
1336 1336
1337 1337 ms = mergemod.mergestate(self)
1338 1338 for f in changes[0]:
1339 1339 if f in ms and ms[f] == 'u':
1340 1340 raise util.Abort(_("unresolved merge conflicts "
1341 1341 "(see hg help resolve)"))
1342 1342
1343 1343 cctx = context.workingctx(self, text, user, date, extra, changes)
1344 1344 if editor:
1345 1345 cctx._text = editor(self, cctx, subs)
1346 1346 edited = (text != cctx._text)
1347 1347
1348 1348 # commit subs and write new state
1349 1349 if subs:
1350 1350 for s in sorted(commitsubs):
1351 1351 sub = wctx.sub(s)
1352 1352 self.ui.status(_('committing subrepository %s\n') %
1353 1353 subrepo.subrelpath(sub))
1354 1354 sr = sub.commit(cctx._text, user, date)
1355 1355 newstate[s] = (newstate[s][0], sr)
1356 1356 subrepo.writestate(self, newstate)
1357 1357
1358 1358 # Save commit message in case this transaction gets rolled back
1359 1359 # (e.g. by a pretxncommit hook). Leave the content alone on
1360 1360 # the assumption that the user will use the same editor again.
1361 1361 msgfn = self.savecommitmessage(cctx._text)
1362 1362
1363 1363 p1, p2 = self.dirstate.parents()
1364 1364 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1365 1365 try:
1366 1366 self.hook("precommit", throw=True, parent1=hookp1,
1367 1367 parent2=hookp2)
1368 1368 ret = self.commitctx(cctx, True)
1369 1369 except: # re-raises
1370 1370 if edited:
1371 1371 self.ui.write(
1372 1372 _('note: commit message saved in %s\n') % msgfn)
1373 1373 raise
1374 1374
1375 1375 # update bookmarks, dirstate and mergestate
1376 1376 bookmarks.update(self, [p1, p2], ret)
1377 1377 for f in changes[0] + changes[1]:
1378 1378 self.dirstate.normal(f)
1379 1379 for f in changes[2]:
1380 1380 self.dirstate.drop(f)
1381 1381 self.dirstate.setparents(ret)
1382 1382 ms.reset()
1383 1383 finally:
1384 1384 wlock.release()
1385 1385
1386 1386 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1387 1387 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1388 1388 self._afterlock(commithook)
1389 1389 return ret
1390 1390
1391 1391 def commitctx(self, ctx, error=False):
1392 1392 """Add a new revision to current repository.
1393 1393 Revision information is passed via the context argument.
1394 1394 """
1395 1395
1396 1396 tr = lock = None
1397 1397 removed = list(ctx.removed())
1398 1398 p1, p2 = ctx.p1(), ctx.p2()
1399 1399 user = ctx.user()
1400 1400
1401 1401 lock = self.lock()
1402 1402 try:
1403 1403 tr = self.transaction("commit")
1404 1404 trp = weakref.proxy(tr)
1405 1405
1406 1406 if ctx.files():
1407 1407 m1 = p1.manifest().copy()
1408 1408 m2 = p2.manifest()
1409 1409
1410 1410 # check in files
1411 1411 new = {}
1412 1412 changed = []
1413 1413 linkrev = len(self)
1414 1414 for f in sorted(ctx.modified() + ctx.added()):
1415 1415 self.ui.note(f + "\n")
1416 1416 try:
1417 1417 fctx = ctx[f]
1418 1418 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1419 1419 changed)
1420 1420 m1.set(f, fctx.flags())
1421 1421 except OSError, inst:
1422 1422 self.ui.warn(_("trouble committing %s!\n") % f)
1423 1423 raise
1424 1424 except IOError, inst:
1425 1425 errcode = getattr(inst, 'errno', errno.ENOENT)
1426 1426 if error or errcode and errcode != errno.ENOENT:
1427 1427 self.ui.warn(_("trouble committing %s!\n") % f)
1428 1428 raise
1429 1429 else:
1430 1430 removed.append(f)
1431 1431
1432 1432 # update manifest
1433 1433 m1.update(new)
1434 1434 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1435 1435 drop = [f for f in removed if f in m1]
1436 1436 for f in drop:
1437 1437 del m1[f]
1438 1438 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1439 1439 p2.manifestnode(), (new, drop))
1440 1440 files = changed + removed
1441 1441 else:
1442 1442 mn = p1.manifestnode()
1443 1443 files = []
1444 1444
1445 1445 # update changelog
1446 1446 self.changelog.delayupdate()
1447 1447 n = self.changelog.add(mn, files, ctx.description(),
1448 1448 trp, p1.node(), p2.node(),
1449 1449 user, ctx.date(), ctx.extra().copy())
1450 1450 p = lambda: self.changelog.writepending() and self.root or ""
1451 1451 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1452 1452 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1453 1453 parent2=xp2, pending=p)
1454 1454 self.changelog.finalize(trp)
1455 1455 # set the new commit is proper phase
1456 1456 targetphase = phases.newcommitphase(self.ui)
1457 1457 if targetphase:
1458 1458 # retract boundary do not alter parent changeset.
1459 1459 # if a parent have higher the resulting phase will
1460 1460 # be compliant anyway
1461 1461 #
1462 1462 # if minimal phase was 0 we don't need to retract anything
1463 1463 phases.retractboundary(self, targetphase, [n])
1464 1464 tr.close()
1465 1465 self.updatebranchcache()
1466 1466 return n
1467 1467 finally:
1468 1468 if tr:
1469 1469 tr.release()
1470 1470 lock.release()
1471 1471
1472 1472 def destroyed(self, newheadnodes=None):
1473 1473 '''Inform the repository that nodes have been destroyed.
1474 1474 Intended for use by strip and rollback, so there's a common
1475 1475 place for anything that has to be done after destroying history.
1476 1476
1477 1477 If you know the branchheadcache was uptodate before nodes were removed
1478 1478 and you also know the set of candidate new heads that may have resulted
1479 1479 from the destruction, you can set newheadnodes. This will enable the
1480 1480 code to update the branchheads cache, rather than having future code
1481 1481 decide it's invalid and regenrating it from scratch.
1482 1482 '''
1483 1483 # If we have info, newheadnodes, on how to update the branch cache, do
1484 1484 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1485 1485 # will be caught the next time it is read.
1486 1486 if newheadnodes:
1487 1487 tiprev = len(self) - 1
1488 1488 ctxgen = (self[node] for node in newheadnodes
1489 1489 if self.changelog.hasnode(node))
1490 1490 self._updatebranchcache(self._branchcache, ctxgen)
1491 1491 self._writebranchcache(self._branchcache, self.changelog.tip(),
1492 1492 tiprev)
1493 1493
1494 1494 # Ensure the persistent tag cache is updated. Doing it now
1495 1495 # means that the tag cache only has to worry about destroyed
1496 1496 # heads immediately after a strip/rollback. That in turn
1497 1497 # guarantees that "cachetip == currenttip" (comparing both rev
1498 1498 # and node) always means no nodes have been added or destroyed.
1499 1499
1500 1500 # XXX this is suboptimal when qrefresh'ing: we strip the current
1501 1501 # head, refresh the tag cache, then immediately add a new head.
1502 1502 # But I think doing it this way is necessary for the "instant
1503 1503 # tag cache retrieval" case to work.
1504 1504 self.invalidatecaches()
1505 1505
1506 # Discard all cache entries to force reloading everything.
1507 self._filecache.clear()
1508
1506 1509 def walk(self, match, node=None):
1507 1510 '''
1508 1511 walk recursively through the directory tree or a given
1509 1512 changeset, finding all files matched by the match
1510 1513 function
1511 1514 '''
1512 1515 return self[node].walk(match)
1513 1516
1514 1517 def status(self, node1='.', node2=None, match=None,
1515 1518 ignored=False, clean=False, unknown=False,
1516 1519 listsubrepos=False):
1517 1520 """return status of files between two nodes or node and working
1518 1521 directory.
1519 1522
1520 1523 If node1 is None, use the first dirstate parent instead.
1521 1524 If node2 is None, compare node1 with working directory.
1522 1525 """
1523 1526
1524 1527 def mfmatches(ctx):
1525 1528 mf = ctx.manifest().copy()
1526 1529 if match.always():
1527 1530 return mf
1528 1531 for fn in mf.keys():
1529 1532 if not match(fn):
1530 1533 del mf[fn]
1531 1534 return mf
1532 1535
1533 1536 if isinstance(node1, context.changectx):
1534 1537 ctx1 = node1
1535 1538 else:
1536 1539 ctx1 = self[node1]
1537 1540 if isinstance(node2, context.changectx):
1538 1541 ctx2 = node2
1539 1542 else:
1540 1543 ctx2 = self[node2]
1541 1544
1542 1545 working = ctx2.rev() is None
1543 1546 parentworking = working and ctx1 == self['.']
1544 1547 match = match or matchmod.always(self.root, self.getcwd())
1545 1548 listignored, listclean, listunknown = ignored, clean, unknown
1546 1549
1547 1550 # load earliest manifest first for caching reasons
1548 1551 if not working and ctx2.rev() < ctx1.rev():
1549 1552 ctx2.manifest()
1550 1553
1551 1554 if not parentworking:
1552 1555 def bad(f, msg):
1553 1556 # 'f' may be a directory pattern from 'match.files()',
1554 1557 # so 'f not in ctx1' is not enough
1555 1558 if f not in ctx1 and f not in ctx1.dirs():
1556 1559 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1557 1560 match.bad = bad
1558 1561
1559 1562 if working: # we need to scan the working dir
1560 1563 subrepos = []
1561 1564 if '.hgsub' in self.dirstate:
1562 1565 subrepos = ctx2.substate.keys()
1563 1566 s = self.dirstate.status(match, subrepos, listignored,
1564 1567 listclean, listunknown)
1565 1568 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1566 1569
1567 1570 # check for any possibly clean files
1568 1571 if parentworking and cmp:
1569 1572 fixup = []
1570 1573 # do a full compare of any files that might have changed
1571 1574 for f in sorted(cmp):
1572 1575 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1573 1576 or ctx1[f].cmp(ctx2[f])):
1574 1577 modified.append(f)
1575 1578 else:
1576 1579 fixup.append(f)
1577 1580
1578 1581 # update dirstate for files that are actually clean
1579 1582 if fixup:
1580 1583 if listclean:
1581 1584 clean += fixup
1582 1585
1583 1586 try:
1584 1587 # updating the dirstate is optional
1585 1588 # so we don't wait on the lock
1586 1589 wlock = self.wlock(False)
1587 1590 try:
1588 1591 for f in fixup:
1589 1592 self.dirstate.normal(f)
1590 1593 finally:
1591 1594 wlock.release()
1592 1595 except error.LockError:
1593 1596 pass
1594 1597
1595 1598 if not parentworking:
1596 1599 mf1 = mfmatches(ctx1)
1597 1600 if working:
1598 1601 # we are comparing working dir against non-parent
1599 1602 # generate a pseudo-manifest for the working dir
1600 1603 mf2 = mfmatches(self['.'])
1601 1604 for f in cmp + modified + added:
1602 1605 mf2[f] = None
1603 1606 mf2.set(f, ctx2.flags(f))
1604 1607 for f in removed:
1605 1608 if f in mf2:
1606 1609 del mf2[f]
1607 1610 else:
1608 1611 # we are comparing two revisions
1609 1612 deleted, unknown, ignored = [], [], []
1610 1613 mf2 = mfmatches(ctx2)
1611 1614
1612 1615 modified, added, clean = [], [], []
1613 1616 withflags = mf1.withflags() | mf2.withflags()
1614 1617 for fn in mf2:
1615 1618 if fn in mf1:
1616 1619 if (fn not in deleted and
1617 1620 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1618 1621 (mf1[fn] != mf2[fn] and
1619 1622 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1620 1623 modified.append(fn)
1621 1624 elif listclean:
1622 1625 clean.append(fn)
1623 1626 del mf1[fn]
1624 1627 elif fn not in deleted:
1625 1628 added.append(fn)
1626 1629 removed = mf1.keys()
1627 1630
1628 1631 if working and modified and not self.dirstate._checklink:
1629 1632 # Symlink placeholders may get non-symlink-like contents
1630 1633 # via user error or dereferencing by NFS or Samba servers,
1631 1634 # so we filter out any placeholders that don't look like a
1632 1635 # symlink
1633 1636 sane = []
1634 1637 for f in modified:
1635 1638 if ctx2.flags(f) == 'l':
1636 1639 d = ctx2[f].data()
1637 1640 if len(d) >= 1024 or '\n' in d or util.binary(d):
1638 1641 self.ui.debug('ignoring suspect symlink placeholder'
1639 1642 ' "%s"\n' % f)
1640 1643 continue
1641 1644 sane.append(f)
1642 1645 modified = sane
1643 1646
1644 1647 r = modified, added, removed, deleted, unknown, ignored, clean
1645 1648
1646 1649 if listsubrepos:
1647 1650 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1648 1651 if working:
1649 1652 rev2 = None
1650 1653 else:
1651 1654 rev2 = ctx2.substate[subpath][1]
1652 1655 try:
1653 1656 submatch = matchmod.narrowmatcher(subpath, match)
1654 1657 s = sub.status(rev2, match=submatch, ignored=listignored,
1655 1658 clean=listclean, unknown=listunknown,
1656 1659 listsubrepos=True)
1657 1660 for rfiles, sfiles in zip(r, s):
1658 1661 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1659 1662 except error.LookupError:
1660 1663 self.ui.status(_("skipping missing subrepository: %s\n")
1661 1664 % subpath)
1662 1665
1663 1666 for l in r:
1664 1667 l.sort()
1665 1668 return r
1666 1669
1667 1670 def heads(self, start=None):
1668 1671 heads = self.changelog.heads(start)
1669 1672 # sort the output in rev descending order
1670 1673 return sorted(heads, key=self.changelog.rev, reverse=True)
1671 1674
1672 1675 def branchheads(self, branch=None, start=None, closed=False):
1673 1676 '''return a (possibly filtered) list of heads for the given branch
1674 1677
1675 1678 Heads are returned in topological order, from newest to oldest.
1676 1679 If branch is None, use the dirstate branch.
1677 1680 If start is not None, return only heads reachable from start.
1678 1681 If closed is True, return heads that are marked as closed as well.
1679 1682 '''
1680 1683 if branch is None:
1681 1684 branch = self[None].branch()
1682 1685 branches = self.branchmap()
1683 1686 if branch not in branches:
1684 1687 return []
1685 1688 # the cache returns heads ordered lowest to highest
1686 1689 bheads = list(reversed(branches[branch]))
1687 1690 if start is not None:
1688 1691 # filter out the heads that cannot be reached from startrev
1689 1692 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1690 1693 bheads = [h for h in bheads if h in fbheads]
1691 1694 if not closed:
1692 1695 bheads = [h for h in bheads if not self[h].closesbranch()]
1693 1696 return bheads
1694 1697
1695 1698 def branches(self, nodes):
1696 1699 if not nodes:
1697 1700 nodes = [self.changelog.tip()]
1698 1701 b = []
1699 1702 for n in nodes:
1700 1703 t = n
1701 1704 while True:
1702 1705 p = self.changelog.parents(n)
1703 1706 if p[1] != nullid or p[0] == nullid:
1704 1707 b.append((t, n, p[0], p[1]))
1705 1708 break
1706 1709 n = p[0]
1707 1710 return b
1708 1711
1709 1712 def between(self, pairs):
1710 1713 r = []
1711 1714
1712 1715 for top, bottom in pairs:
1713 1716 n, l, i = top, [], 0
1714 1717 f = 1
1715 1718
1716 1719 while n != bottom and n != nullid:
1717 1720 p = self.changelog.parents(n)[0]
1718 1721 if i == f:
1719 1722 l.append(n)
1720 1723 f = f * 2
1721 1724 n = p
1722 1725 i += 1
1723 1726
1724 1727 r.append(l)
1725 1728
1726 1729 return r
1727 1730
1728 1731 def pull(self, remote, heads=None, force=False):
1729 1732 # don't open transaction for nothing or you break future useful
1730 1733 # rollback call
1731 1734 tr = None
1732 1735 trname = 'pull\n' + util.hidepassword(remote.url())
1733 1736 lock = self.lock()
1734 1737 try:
1735 1738 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1736 1739 force=force)
1737 1740 common, fetch, rheads = tmp
1738 1741 if not fetch:
1739 1742 self.ui.status(_("no changes found\n"))
1740 1743 added = []
1741 1744 result = 0
1742 1745 else:
1743 1746 tr = self.transaction(trname)
1744 1747 if heads is None and list(common) == [nullid]:
1745 1748 self.ui.status(_("requesting all changes\n"))
1746 1749 elif heads is None and remote.capable('changegroupsubset'):
1747 1750 # issue1320, avoid a race if remote changed after discovery
1748 1751 heads = rheads
1749 1752
1750 1753 if remote.capable('getbundle'):
1751 1754 cg = remote.getbundle('pull', common=common,
1752 1755 heads=heads or rheads)
1753 1756 elif heads is None:
1754 1757 cg = remote.changegroup(fetch, 'pull')
1755 1758 elif not remote.capable('changegroupsubset'):
1756 1759 raise util.Abort(_("partial pull cannot be done because "
1757 1760 "other repository doesn't support "
1758 1761 "changegroupsubset."))
1759 1762 else:
1760 1763 cg = remote.changegroupsubset(fetch, heads, 'pull')
1761 1764 clstart = len(self.changelog)
1762 1765 result = self.addchangegroup(cg, 'pull', remote.url())
1763 1766 clend = len(self.changelog)
1764 1767 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1765 1768
1766 1769 # compute target subset
1767 1770 if heads is None:
1768 1771 # We pulled every thing possible
1769 1772 # sync on everything common
1770 1773 subset = common + added
1771 1774 else:
1772 1775 # We pulled a specific subset
1773 1776 # sync on this subset
1774 1777 subset = heads
1775 1778
1776 1779 # Get remote phases data from remote
1777 1780 remotephases = remote.listkeys('phases')
1778 1781 publishing = bool(remotephases.get('publishing', False))
1779 1782 if remotephases and not publishing:
1780 1783 # remote is new and unpublishing
1781 1784 pheads, _dr = phases.analyzeremotephases(self, subset,
1782 1785 remotephases)
1783 1786 phases.advanceboundary(self, phases.public, pheads)
1784 1787 phases.advanceboundary(self, phases.draft, subset)
1785 1788 else:
1786 1789 # Remote is old or publishing all common changesets
1787 1790 # should be seen as public
1788 1791 phases.advanceboundary(self, phases.public, subset)
1789 1792
1790 1793 if obsolete._enabled:
1791 1794 self.ui.debug('fetching remote obsolete markers')
1792 1795 remoteobs = remote.listkeys('obsolete')
1793 1796 if 'dump0' in remoteobs:
1794 1797 if tr is None:
1795 1798 tr = self.transaction(trname)
1796 1799 for key in sorted(remoteobs, reverse=True):
1797 1800 if key.startswith('dump'):
1798 1801 data = base85.b85decode(remoteobs[key])
1799 1802 self.obsstore.mergemarkers(tr, data)
1800 1803 if tr is not None:
1801 1804 tr.close()
1802 1805 finally:
1803 1806 if tr is not None:
1804 1807 tr.release()
1805 1808 lock.release()
1806 1809
1807 1810 return result
1808 1811
1809 1812 def checkpush(self, force, revs):
1810 1813 """Extensions can override this function if additional checks have
1811 1814 to be performed before pushing, or call it if they override push
1812 1815 command.
1813 1816 """
1814 1817 pass
1815 1818
1816 1819 def push(self, remote, force=False, revs=None, newbranch=False):
1817 1820 '''Push outgoing changesets (limited by revs) from the current
1818 1821 repository to remote. Return an integer:
1819 1822 - None means nothing to push
1820 1823 - 0 means HTTP error
1821 1824 - 1 means we pushed and remote head count is unchanged *or*
1822 1825 we have outgoing changesets but refused to push
1823 1826 - other values as described by addchangegroup()
1824 1827 '''
1825 1828 # there are two ways to push to remote repo:
1826 1829 #
1827 1830 # addchangegroup assumes local user can lock remote
1828 1831 # repo (local filesystem, old ssh servers).
1829 1832 #
1830 1833 # unbundle assumes local user cannot lock remote repo (new ssh
1831 1834 # servers, http servers).
1832 1835
1833 1836 if not remote.canpush():
1834 1837 raise util.Abort(_("destination does not support push"))
1835 1838 # get local lock as we might write phase data
1836 1839 locallock = self.lock()
1837 1840 try:
1838 1841 self.checkpush(force, revs)
1839 1842 lock = None
1840 1843 unbundle = remote.capable('unbundle')
1841 1844 if not unbundle:
1842 1845 lock = remote.lock()
1843 1846 try:
1844 1847 # discovery
1845 1848 fci = discovery.findcommonincoming
1846 1849 commoninc = fci(self, remote, force=force)
1847 1850 common, inc, remoteheads = commoninc
1848 1851 fco = discovery.findcommonoutgoing
1849 1852 outgoing = fco(self, remote, onlyheads=revs,
1850 1853 commoninc=commoninc, force=force)
1851 1854
1852 1855
1853 1856 if not outgoing.missing:
1854 1857 # nothing to push
1855 1858 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1856 1859 ret = None
1857 1860 else:
1858 1861 # something to push
1859 1862 if not force:
1860 1863 # if self.obsstore == False --> no obsolete
1861 1864 # then, save the iteration
1862 1865 if self.obsstore:
1863 1866 # this message are here for 80 char limit reason
1864 1867 mso = _("push includes an obsolete changeset: %s!")
1865 1868 msu = _("push includes an unstable changeset: %s!")
1866 1869 # If we are to push if there is at least one
1867 1870 # obsolete or unstable changeset in missing, at
1868 1871 # least one of the missinghead will be obsolete or
1869 1872 # unstable. So checking heads only is ok
1870 1873 for node in outgoing.missingheads:
1871 1874 ctx = self[node]
1872 1875 if ctx.obsolete():
1873 1876 raise util.Abort(_(mso) % ctx)
1874 1877 elif ctx.unstable():
1875 1878 raise util.Abort(_(msu) % ctx)
1876 1879 discovery.checkheads(self, remote, outgoing,
1877 1880 remoteheads, newbranch,
1878 1881 bool(inc))
1879 1882
1880 1883 # create a changegroup from local
1881 1884 if revs is None and not outgoing.excluded:
1882 1885 # push everything,
1883 1886 # use the fast path, no race possible on push
1884 1887 cg = self._changegroup(outgoing.missing, 'push')
1885 1888 else:
1886 1889 cg = self.getlocalbundle('push', outgoing)
1887 1890
1888 1891 # apply changegroup to remote
1889 1892 if unbundle:
1890 1893 # local repo finds heads on server, finds out what
1891 1894 # revs it must push. once revs transferred, if server
1892 1895 # finds it has different heads (someone else won
1893 1896 # commit/push race), server aborts.
1894 1897 if force:
1895 1898 remoteheads = ['force']
1896 1899 # ssh: return remote's addchangegroup()
1897 1900 # http: return remote's addchangegroup() or 0 for error
1898 1901 ret = remote.unbundle(cg, remoteheads, 'push')
1899 1902 else:
1900 1903 # we return an integer indicating remote head count
1901 1904 # change
1902 1905 ret = remote.addchangegroup(cg, 'push', self.url())
1903 1906
1904 1907 if ret:
1905 1908 # push succeed, synchonize target of the push
1906 1909 cheads = outgoing.missingheads
1907 1910 elif revs is None:
1908 1911 # All out push fails. synchronize all common
1909 1912 cheads = outgoing.commonheads
1910 1913 else:
1911 1914 # I want cheads = heads(::missingheads and ::commonheads)
1912 1915 # (missingheads is revs with secret changeset filtered out)
1913 1916 #
1914 1917 # This can be expressed as:
1915 1918 # cheads = ( (missingheads and ::commonheads)
1916 1919 # + (commonheads and ::missingheads))"
1917 1920 # )
1918 1921 #
1919 1922 # while trying to push we already computed the following:
1920 1923 # common = (::commonheads)
1921 1924 # missing = ((commonheads::missingheads) - commonheads)
1922 1925 #
1923 1926 # We can pick:
1924 1927 # * missingheads part of comon (::commonheads)
1925 1928 common = set(outgoing.common)
1926 1929 cheads = [node for node in revs if node in common]
1927 1930 # and
1928 1931 # * commonheads parents on missing
1929 1932 revset = self.set('%ln and parents(roots(%ln))',
1930 1933 outgoing.commonheads,
1931 1934 outgoing.missing)
1932 1935 cheads.extend(c.node() for c in revset)
1933 1936 # even when we don't push, exchanging phase data is useful
1934 1937 remotephases = remote.listkeys('phases')
1935 1938 if not remotephases: # old server or public only repo
1936 1939 phases.advanceboundary(self, phases.public, cheads)
1937 1940 # don't push any phase data as there is nothing to push
1938 1941 else:
1939 1942 ana = phases.analyzeremotephases(self, cheads, remotephases)
1940 1943 pheads, droots = ana
1941 1944 ### Apply remote phase on local
1942 1945 if remotephases.get('publishing', False):
1943 1946 phases.advanceboundary(self, phases.public, cheads)
1944 1947 else: # publish = False
1945 1948 phases.advanceboundary(self, phases.public, pheads)
1946 1949 phases.advanceboundary(self, phases.draft, cheads)
1947 1950 ### Apply local phase on remote
1948 1951
1949 1952 # Get the list of all revs draft on remote by public here.
1950 1953 # XXX Beware that revset break if droots is not strictly
1951 1954 # XXX root we may want to ensure it is but it is costly
1952 1955 outdated = self.set('heads((%ln::%ln) and public())',
1953 1956 droots, cheads)
1954 1957 for newremotehead in outdated:
1955 1958 r = remote.pushkey('phases',
1956 1959 newremotehead.hex(),
1957 1960 str(phases.draft),
1958 1961 str(phases.public))
1959 1962 if not r:
1960 1963 self.ui.warn(_('updating %s to public failed!\n')
1961 1964 % newremotehead)
1962 1965 self.ui.debug('try to push obsolete markers to remote\n')
1963 1966 if (obsolete._enabled and self.obsstore and
1964 1967 'obsolete' in remote.listkeys('namespaces')):
1965 1968 rslts = []
1966 1969 remotedata = self.listkeys('obsolete')
1967 1970 for key in sorted(remotedata, reverse=True):
1968 1971 # reverse sort to ensure we end with dump0
1969 1972 data = remotedata[key]
1970 1973 rslts.append(remote.pushkey('obsolete', key, '', data))
1971 1974 if [r for r in rslts if not r]:
1972 1975 msg = _('failed to push some obsolete markers!\n')
1973 1976 self.ui.warn(msg)
1974 1977 finally:
1975 1978 if lock is not None:
1976 1979 lock.release()
1977 1980 finally:
1978 1981 locallock.release()
1979 1982
1980 1983 self.ui.debug("checking for updated bookmarks\n")
1981 1984 rb = remote.listkeys('bookmarks')
1982 1985 for k in rb.keys():
1983 1986 if k in self._bookmarks:
1984 1987 nr, nl = rb[k], hex(self._bookmarks[k])
1985 1988 if nr in self:
1986 1989 cr = self[nr]
1987 1990 cl = self[nl]
1988 1991 if cl in cr.descendants():
1989 1992 r = remote.pushkey('bookmarks', k, nr, nl)
1990 1993 if r:
1991 1994 self.ui.status(_("updating bookmark %s\n") % k)
1992 1995 else:
1993 1996 self.ui.warn(_('updating bookmark %s'
1994 1997 ' failed!\n') % k)
1995 1998
1996 1999 return ret
1997 2000
1998 2001 def changegroupinfo(self, nodes, source):
1999 2002 if self.ui.verbose or source == 'bundle':
2000 2003 self.ui.status(_("%d changesets found\n") % len(nodes))
2001 2004 if self.ui.debugflag:
2002 2005 self.ui.debug("list of changesets:\n")
2003 2006 for node in nodes:
2004 2007 self.ui.debug("%s\n" % hex(node))
2005 2008
2006 2009 def changegroupsubset(self, bases, heads, source):
2007 2010 """Compute a changegroup consisting of all the nodes that are
2008 2011 descendants of any of the bases and ancestors of any of the heads.
2009 2012 Return a chunkbuffer object whose read() method will return
2010 2013 successive changegroup chunks.
2011 2014
2012 2015 It is fairly complex as determining which filenodes and which
2013 2016 manifest nodes need to be included for the changeset to be complete
2014 2017 is non-trivial.
2015 2018
2016 2019 Another wrinkle is doing the reverse, figuring out which changeset in
2017 2020 the changegroup a particular filenode or manifestnode belongs to.
2018 2021 """
2019 2022 cl = self.changelog
2020 2023 if not bases:
2021 2024 bases = [nullid]
2022 2025 csets, bases, heads = cl.nodesbetween(bases, heads)
2023 2026 # We assume that all ancestors of bases are known
2024 2027 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2025 2028 return self._changegroupsubset(common, csets, heads, source)
2026 2029
2027 2030 def getlocalbundle(self, source, outgoing):
2028 2031 """Like getbundle, but taking a discovery.outgoing as an argument.
2029 2032
2030 2033 This is only implemented for local repos and reuses potentially
2031 2034 precomputed sets in outgoing."""
2032 2035 if not outgoing.missing:
2033 2036 return None
2034 2037 return self._changegroupsubset(outgoing.common,
2035 2038 outgoing.missing,
2036 2039 outgoing.missingheads,
2037 2040 source)
2038 2041
2039 2042 def getbundle(self, source, heads=None, common=None):
2040 2043 """Like changegroupsubset, but returns the set difference between the
2041 2044 ancestors of heads and the ancestors common.
2042 2045
2043 2046 If heads is None, use the local heads. If common is None, use [nullid].
2044 2047
2045 2048 The nodes in common might not all be known locally due to the way the
2046 2049 current discovery protocol works.
2047 2050 """
2048 2051 cl = self.changelog
2049 2052 if common:
2050 2053 nm = cl.nodemap
2051 2054 common = [n for n in common if n in nm]
2052 2055 else:
2053 2056 common = [nullid]
2054 2057 if not heads:
2055 2058 heads = cl.heads()
2056 2059 return self.getlocalbundle(source,
2057 2060 discovery.outgoing(cl, common, heads))
2058 2061
2059 2062 def _changegroupsubset(self, commonrevs, csets, heads, source):
2060 2063
2061 2064 cl = self.changelog
2062 2065 mf = self.manifest
2063 2066 mfs = {} # needed manifests
2064 2067 fnodes = {} # needed file nodes
2065 2068 changedfiles = set()
2066 2069 fstate = ['', {}]
2067 2070 count = [0, 0]
2068 2071
2069 2072 # can we go through the fast path ?
2070 2073 heads.sort()
2071 2074 if heads == sorted(self.heads()):
2072 2075 return self._changegroup(csets, source)
2073 2076
2074 2077 # slow path
2075 2078 self.hook('preoutgoing', throw=True, source=source)
2076 2079 self.changegroupinfo(csets, source)
2077 2080
2078 2081 # filter any nodes that claim to be part of the known set
2079 2082 def prune(revlog, missing):
2080 2083 rr, rl = revlog.rev, revlog.linkrev
2081 2084 return [n for n in missing
2082 2085 if rl(rr(n)) not in commonrevs]
2083 2086
2084 2087 progress = self.ui.progress
2085 2088 _bundling = _('bundling')
2086 2089 _changesets = _('changesets')
2087 2090 _manifests = _('manifests')
2088 2091 _files = _('files')
2089 2092
2090 2093 def lookup(revlog, x):
2091 2094 if revlog == cl:
2092 2095 c = cl.read(x)
2093 2096 changedfiles.update(c[3])
2094 2097 mfs.setdefault(c[0], x)
2095 2098 count[0] += 1
2096 2099 progress(_bundling, count[0],
2097 2100 unit=_changesets, total=count[1])
2098 2101 return x
2099 2102 elif revlog == mf:
2100 2103 clnode = mfs[x]
2101 2104 mdata = mf.readfast(x)
2102 2105 for f, n in mdata.iteritems():
2103 2106 if f in changedfiles:
2104 2107 fnodes[f].setdefault(n, clnode)
2105 2108 count[0] += 1
2106 2109 progress(_bundling, count[0],
2107 2110 unit=_manifests, total=count[1])
2108 2111 return clnode
2109 2112 else:
2110 2113 progress(_bundling, count[0], item=fstate[0],
2111 2114 unit=_files, total=count[1])
2112 2115 return fstate[1][x]
2113 2116
2114 2117 bundler = changegroup.bundle10(lookup)
2115 2118 reorder = self.ui.config('bundle', 'reorder', 'auto')
2116 2119 if reorder == 'auto':
2117 2120 reorder = None
2118 2121 else:
2119 2122 reorder = util.parsebool(reorder)
2120 2123
2121 2124 def gengroup():
2122 2125 # Create a changenode group generator that will call our functions
2123 2126 # back to lookup the owning changenode and collect information.
2124 2127 count[:] = [0, len(csets)]
2125 2128 for chunk in cl.group(csets, bundler, reorder=reorder):
2126 2129 yield chunk
2127 2130 progress(_bundling, None)
2128 2131
2129 2132 # Create a generator for the manifestnodes that calls our lookup
2130 2133 # and data collection functions back.
2131 2134 for f in changedfiles:
2132 2135 fnodes[f] = {}
2133 2136 count[:] = [0, len(mfs)]
2134 2137 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2135 2138 yield chunk
2136 2139 progress(_bundling, None)
2137 2140
2138 2141 mfs.clear()
2139 2142
2140 2143 # Go through all our files in order sorted by name.
2141 2144 count[:] = [0, len(changedfiles)]
2142 2145 for fname in sorted(changedfiles):
2143 2146 filerevlog = self.file(fname)
2144 2147 if not len(filerevlog):
2145 2148 raise util.Abort(_("empty or missing revlog for %s")
2146 2149 % fname)
2147 2150 fstate[0] = fname
2148 2151 fstate[1] = fnodes.pop(fname, {})
2149 2152
2150 2153 nodelist = prune(filerevlog, fstate[1])
2151 2154 if nodelist:
2152 2155 count[0] += 1
2153 2156 yield bundler.fileheader(fname)
2154 2157 for chunk in filerevlog.group(nodelist, bundler, reorder):
2155 2158 yield chunk
2156 2159
2157 2160 # Signal that no more groups are left.
2158 2161 yield bundler.close()
2159 2162 progress(_bundling, None)
2160 2163
2161 2164 if csets:
2162 2165 self.hook('outgoing', node=hex(csets[0]), source=source)
2163 2166
2164 2167 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2165 2168
2166 2169 def changegroup(self, basenodes, source):
2167 2170 # to avoid a race we use changegroupsubset() (issue1320)
2168 2171 return self.changegroupsubset(basenodes, self.heads(), source)
2169 2172
2170 2173 def _changegroup(self, nodes, source):
2171 2174 """Compute the changegroup of all nodes that we have that a recipient
2172 2175 doesn't. Return a chunkbuffer object whose read() method will return
2173 2176 successive changegroup chunks.
2174 2177
2175 2178 This is much easier than the previous function as we can assume that
2176 2179 the recipient has any changenode we aren't sending them.
2177 2180
2178 2181 nodes is the set of nodes to send"""
2179 2182
2180 2183 cl = self.changelog
2181 2184 mf = self.manifest
2182 2185 mfs = {}
2183 2186 changedfiles = set()
2184 2187 fstate = ['']
2185 2188 count = [0, 0]
2186 2189
2187 2190 self.hook('preoutgoing', throw=True, source=source)
2188 2191 self.changegroupinfo(nodes, source)
2189 2192
2190 2193 revset = set([cl.rev(n) for n in nodes])
2191 2194
2192 2195 def gennodelst(log):
2193 2196 ln, llr = log.node, log.linkrev
2194 2197 return [ln(r) for r in log if llr(r) in revset]
2195 2198
2196 2199 progress = self.ui.progress
2197 2200 _bundling = _('bundling')
2198 2201 _changesets = _('changesets')
2199 2202 _manifests = _('manifests')
2200 2203 _files = _('files')
2201 2204
2202 2205 def lookup(revlog, x):
2203 2206 if revlog == cl:
2204 2207 c = cl.read(x)
2205 2208 changedfiles.update(c[3])
2206 2209 mfs.setdefault(c[0], x)
2207 2210 count[0] += 1
2208 2211 progress(_bundling, count[0],
2209 2212 unit=_changesets, total=count[1])
2210 2213 return x
2211 2214 elif revlog == mf:
2212 2215 count[0] += 1
2213 2216 progress(_bundling, count[0],
2214 2217 unit=_manifests, total=count[1])
2215 2218 return cl.node(revlog.linkrev(revlog.rev(x)))
2216 2219 else:
2217 2220 progress(_bundling, count[0], item=fstate[0],
2218 2221 total=count[1], unit=_files)
2219 2222 return cl.node(revlog.linkrev(revlog.rev(x)))
2220 2223
2221 2224 bundler = changegroup.bundle10(lookup)
2222 2225 reorder = self.ui.config('bundle', 'reorder', 'auto')
2223 2226 if reorder == 'auto':
2224 2227 reorder = None
2225 2228 else:
2226 2229 reorder = util.parsebool(reorder)
2227 2230
2228 2231 def gengroup():
2229 2232 '''yield a sequence of changegroup chunks (strings)'''
2230 2233 # construct a list of all changed files
2231 2234
2232 2235 count[:] = [0, len(nodes)]
2233 2236 for chunk in cl.group(nodes, bundler, reorder=reorder):
2234 2237 yield chunk
2235 2238 progress(_bundling, None)
2236 2239
2237 2240 count[:] = [0, len(mfs)]
2238 2241 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2239 2242 yield chunk
2240 2243 progress(_bundling, None)
2241 2244
2242 2245 count[:] = [0, len(changedfiles)]
2243 2246 for fname in sorted(changedfiles):
2244 2247 filerevlog = self.file(fname)
2245 2248 if not len(filerevlog):
2246 2249 raise util.Abort(_("empty or missing revlog for %s")
2247 2250 % fname)
2248 2251 fstate[0] = fname
2249 2252 nodelist = gennodelst(filerevlog)
2250 2253 if nodelist:
2251 2254 count[0] += 1
2252 2255 yield bundler.fileheader(fname)
2253 2256 for chunk in filerevlog.group(nodelist, bundler, reorder):
2254 2257 yield chunk
2255 2258 yield bundler.close()
2256 2259 progress(_bundling, None)
2257 2260
2258 2261 if nodes:
2259 2262 self.hook('outgoing', node=hex(nodes[0]), source=source)
2260 2263
2261 2264 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2262 2265
2263 2266 def addchangegroup(self, source, srctype, url, emptyok=False):
2264 2267 """Add the changegroup returned by source.read() to this repo.
2265 2268 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2266 2269 the URL of the repo where this changegroup is coming from.
2267 2270
2268 2271 Return an integer summarizing the change to this repo:
2269 2272 - nothing changed or no source: 0
2270 2273 - more heads than before: 1+added heads (2..n)
2271 2274 - fewer heads than before: -1-removed heads (-2..-n)
2272 2275 - number of heads stays the same: 1
2273 2276 """
2274 2277 def csmap(x):
2275 2278 self.ui.debug("add changeset %s\n" % short(x))
2276 2279 return len(cl)
2277 2280
2278 2281 def revmap(x):
2279 2282 return cl.rev(x)
2280 2283
2281 2284 if not source:
2282 2285 return 0
2283 2286
2284 2287 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2285 2288
2286 2289 changesets = files = revisions = 0
2287 2290 efiles = set()
2288 2291
2289 2292 # write changelog data to temp files so concurrent readers will not see
2290 2293 # inconsistent view
2291 2294 cl = self.changelog
2292 2295 cl.delayupdate()
2293 2296 oldheads = cl.heads()
2294 2297
2295 2298 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2296 2299 try:
2297 2300 trp = weakref.proxy(tr)
2298 2301 # pull off the changeset group
2299 2302 self.ui.status(_("adding changesets\n"))
2300 2303 clstart = len(cl)
2301 2304 class prog(object):
2302 2305 step = _('changesets')
2303 2306 count = 1
2304 2307 ui = self.ui
2305 2308 total = None
2306 2309 def __call__(self):
2307 2310 self.ui.progress(self.step, self.count, unit=_('chunks'),
2308 2311 total=self.total)
2309 2312 self.count += 1
2310 2313 pr = prog()
2311 2314 source.callback = pr
2312 2315
2313 2316 source.changelogheader()
2314 2317 srccontent = cl.addgroup(source, csmap, trp)
2315 2318 if not (srccontent or emptyok):
2316 2319 raise util.Abort(_("received changelog group is empty"))
2317 2320 clend = len(cl)
2318 2321 changesets = clend - clstart
2319 2322 for c in xrange(clstart, clend):
2320 2323 efiles.update(self[c].files())
2321 2324 efiles = len(efiles)
2322 2325 self.ui.progress(_('changesets'), None)
2323 2326
2324 2327 # pull off the manifest group
2325 2328 self.ui.status(_("adding manifests\n"))
2326 2329 pr.step = _('manifests')
2327 2330 pr.count = 1
2328 2331 pr.total = changesets # manifests <= changesets
2329 2332 # no need to check for empty manifest group here:
2330 2333 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2331 2334 # no new manifest will be created and the manifest group will
2332 2335 # be empty during the pull
2333 2336 source.manifestheader()
2334 2337 self.manifest.addgroup(source, revmap, trp)
2335 2338 self.ui.progress(_('manifests'), None)
2336 2339
2337 2340 needfiles = {}
2338 2341 if self.ui.configbool('server', 'validate', default=False):
2339 2342 # validate incoming csets have their manifests
2340 2343 for cset in xrange(clstart, clend):
2341 2344 mfest = self.changelog.read(self.changelog.node(cset))[0]
2342 2345 mfest = self.manifest.readdelta(mfest)
2343 2346 # store file nodes we must see
2344 2347 for f, n in mfest.iteritems():
2345 2348 needfiles.setdefault(f, set()).add(n)
2346 2349
2347 2350 # process the files
2348 2351 self.ui.status(_("adding file changes\n"))
2349 2352 pr.step = _('files')
2350 2353 pr.count = 1
2351 2354 pr.total = efiles
2352 2355 source.callback = None
2353 2356
2354 2357 while True:
2355 2358 chunkdata = source.filelogheader()
2356 2359 if not chunkdata:
2357 2360 break
2358 2361 f = chunkdata["filename"]
2359 2362 self.ui.debug("adding %s revisions\n" % f)
2360 2363 pr()
2361 2364 fl = self.file(f)
2362 2365 o = len(fl)
2363 2366 if not fl.addgroup(source, revmap, trp):
2364 2367 raise util.Abort(_("received file revlog group is empty"))
2365 2368 revisions += len(fl) - o
2366 2369 files += 1
2367 2370 if f in needfiles:
2368 2371 needs = needfiles[f]
2369 2372 for new in xrange(o, len(fl)):
2370 2373 n = fl.node(new)
2371 2374 if n in needs:
2372 2375 needs.remove(n)
2373 2376 if not needs:
2374 2377 del needfiles[f]
2375 2378 self.ui.progress(_('files'), None)
2376 2379
2377 2380 for f, needs in needfiles.iteritems():
2378 2381 fl = self.file(f)
2379 2382 for n in needs:
2380 2383 try:
2381 2384 fl.rev(n)
2382 2385 except error.LookupError:
2383 2386 raise util.Abort(
2384 2387 _('missing file data for %s:%s - run hg verify') %
2385 2388 (f, hex(n)))
2386 2389
2387 2390 dh = 0
2388 2391 if oldheads:
2389 2392 heads = cl.heads()
2390 2393 dh = len(heads) - len(oldheads)
2391 2394 for h in heads:
2392 2395 if h not in oldheads and self[h].closesbranch():
2393 2396 dh -= 1
2394 2397 htext = ""
2395 2398 if dh:
2396 2399 htext = _(" (%+d heads)") % dh
2397 2400
2398 2401 self.ui.status(_("added %d changesets"
2399 2402 " with %d changes to %d files%s\n")
2400 2403 % (changesets, revisions, files, htext))
2401 2404
2402 2405 if changesets > 0:
2403 2406 p = lambda: cl.writepending() and self.root or ""
2404 2407 self.hook('pretxnchangegroup', throw=True,
2405 2408 node=hex(cl.node(clstart)), source=srctype,
2406 2409 url=url, pending=p)
2407 2410
2408 2411 added = [cl.node(r) for r in xrange(clstart, clend)]
2409 2412 publishing = self.ui.configbool('phases', 'publish', True)
2410 2413 if srctype == 'push':
2411 2414 # Old server can not push the boundary themself.
2412 2415 # New server won't push the boundary if changeset already
2413 2416 # existed locally as secrete
2414 2417 #
2415 2418 # We should not use added here but the list of all change in
2416 2419 # the bundle
2417 2420 if publishing:
2418 2421 phases.advanceboundary(self, phases.public, srccontent)
2419 2422 else:
2420 2423 phases.advanceboundary(self, phases.draft, srccontent)
2421 2424 phases.retractboundary(self, phases.draft, added)
2422 2425 elif srctype != 'strip':
2423 2426 # publishing only alter behavior during push
2424 2427 #
2425 2428 # strip should not touch boundary at all
2426 2429 phases.retractboundary(self, phases.draft, added)
2427 2430
2428 2431 # make changelog see real files again
2429 2432 cl.finalize(trp)
2430 2433
2431 2434 tr.close()
2432 2435
2433 2436 if changesets > 0:
2434 2437 def runhooks():
2435 2438 # forcefully update the on-disk branch cache
2436 2439 self.ui.debug("updating the branch cache\n")
2437 2440 self.updatebranchcache()
2438 2441 self.hook("changegroup", node=hex(cl.node(clstart)),
2439 2442 source=srctype, url=url)
2440 2443
2441 2444 for n in added:
2442 2445 self.hook("incoming", node=hex(n), source=srctype,
2443 2446 url=url)
2444 2447 self._afterlock(runhooks)
2445 2448
2446 2449 finally:
2447 2450 tr.release()
2448 2451 # never return 0 here:
2449 2452 if dh < 0:
2450 2453 return dh - 1
2451 2454 else:
2452 2455 return dh + 1
2453 2456
2454 2457 def stream_in(self, remote, requirements):
2455 2458 lock = self.lock()
2456 2459 try:
2457 2460 fp = remote.stream_out()
2458 2461 l = fp.readline()
2459 2462 try:
2460 2463 resp = int(l)
2461 2464 except ValueError:
2462 2465 raise error.ResponseError(
2463 2466 _('unexpected response from remote server:'), l)
2464 2467 if resp == 1:
2465 2468 raise util.Abort(_('operation forbidden by server'))
2466 2469 elif resp == 2:
2467 2470 raise util.Abort(_('locking the remote repository failed'))
2468 2471 elif resp != 0:
2469 2472 raise util.Abort(_('the server sent an unknown error code'))
2470 2473 self.ui.status(_('streaming all changes\n'))
2471 2474 l = fp.readline()
2472 2475 try:
2473 2476 total_files, total_bytes = map(int, l.split(' ', 1))
2474 2477 except (ValueError, TypeError):
2475 2478 raise error.ResponseError(
2476 2479 _('unexpected response from remote server:'), l)
2477 2480 self.ui.status(_('%d files to transfer, %s of data\n') %
2478 2481 (total_files, util.bytecount(total_bytes)))
2479 2482 handled_bytes = 0
2480 2483 self.ui.progress(_('clone'), 0, total=total_bytes)
2481 2484 start = time.time()
2482 2485 for i in xrange(total_files):
2483 2486 # XXX doesn't support '\n' or '\r' in filenames
2484 2487 l = fp.readline()
2485 2488 try:
2486 2489 name, size = l.split('\0', 1)
2487 2490 size = int(size)
2488 2491 except (ValueError, TypeError):
2489 2492 raise error.ResponseError(
2490 2493 _('unexpected response from remote server:'), l)
2491 2494 if self.ui.debugflag:
2492 2495 self.ui.debug('adding %s (%s)\n' %
2493 2496 (name, util.bytecount(size)))
2494 2497 # for backwards compat, name was partially encoded
2495 2498 ofp = self.sopener(store.decodedir(name), 'w')
2496 2499 for chunk in util.filechunkiter(fp, limit=size):
2497 2500 handled_bytes += len(chunk)
2498 2501 self.ui.progress(_('clone'), handled_bytes,
2499 2502 total=total_bytes)
2500 2503 ofp.write(chunk)
2501 2504 ofp.close()
2502 2505 elapsed = time.time() - start
2503 2506 if elapsed <= 0:
2504 2507 elapsed = 0.001
2505 2508 self.ui.progress(_('clone'), None)
2506 2509 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2507 2510 (util.bytecount(total_bytes), elapsed,
2508 2511 util.bytecount(total_bytes / elapsed)))
2509 2512
2510 2513 # new requirements = old non-format requirements +
2511 2514 # new format-related
2512 2515 # requirements from the streamed-in repository
2513 2516 requirements.update(set(self.requirements) - self.supportedformats)
2514 2517 self._applyrequirements(requirements)
2515 2518 self._writerequirements()
2516 2519
2517 2520 self.invalidate()
2518 2521 return len(self.heads()) + 1
2519 2522 finally:
2520 2523 lock.release()
2521 2524
2522 2525 def clone(self, remote, heads=[], stream=False):
2523 2526 '''clone remote repository.
2524 2527
2525 2528 keyword arguments:
2526 2529 heads: list of revs to clone (forces use of pull)
2527 2530 stream: use streaming clone if possible'''
2528 2531
2529 2532 # now, all clients that can request uncompressed clones can
2530 2533 # read repo formats supported by all servers that can serve
2531 2534 # them.
2532 2535
2533 2536 # if revlog format changes, client will have to check version
2534 2537 # and format flags on "stream" capability, and use
2535 2538 # uncompressed only if compatible.
2536 2539
2537 2540 if not stream:
2538 2541 # if the server explicitely prefer to stream (for fast LANs)
2539 2542 stream = remote.capable('stream-preferred')
2540 2543
2541 2544 if stream and not heads:
2542 2545 # 'stream' means remote revlog format is revlogv1 only
2543 2546 if remote.capable('stream'):
2544 2547 return self.stream_in(remote, set(('revlogv1',)))
2545 2548 # otherwise, 'streamreqs' contains the remote revlog format
2546 2549 streamreqs = remote.capable('streamreqs')
2547 2550 if streamreqs:
2548 2551 streamreqs = set(streamreqs.split(','))
2549 2552 # if we support it, stream in and adjust our requirements
2550 2553 if not streamreqs - self.supportedformats:
2551 2554 return self.stream_in(remote, streamreqs)
2552 2555 return self.pull(remote, heads)
2553 2556
2554 2557 def pushkey(self, namespace, key, old, new):
2555 2558 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2556 2559 old=old, new=new)
2557 2560 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2558 2561 ret = pushkey.push(self, namespace, key, old, new)
2559 2562 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2560 2563 ret=ret)
2561 2564 return ret
2562 2565
2563 2566 def listkeys(self, namespace):
2564 2567 self.hook('prelistkeys', throw=True, namespace=namespace)
2565 2568 self.ui.debug('listing keys for "%s"\n' % namespace)
2566 2569 values = pushkey.list(self, namespace)
2567 2570 self.hook('listkeys', namespace=namespace, values=values)
2568 2571 return values
2569 2572
2570 2573 def debugwireargs(self, one, two, three=None, four=None, five=None):
2571 2574 '''used to test argument passing over the wire'''
2572 2575 return "%s %s %s %s %s" % (one, two, three, four, five)
2573 2576
2574 2577 def savecommitmessage(self, text):
2575 2578 fp = self.opener('last-message.txt', 'wb')
2576 2579 try:
2577 2580 fp.write(text)
2578 2581 finally:
2579 2582 fp.close()
2580 2583 return self.pathto(fp.name[len(self.root)+1:])
2581 2584
2582 2585 # used to avoid circular references so destructors work
2583 2586 def aftertrans(files):
2584 2587 renamefiles = [tuple(t) for t in files]
2585 2588 def a():
2586 2589 for src, dest in renamefiles:
2587 2590 try:
2588 2591 util.rename(src, dest)
2589 2592 except OSError: # journal file does not yet exist
2590 2593 pass
2591 2594 return a
2592 2595
2593 2596 def undoname(fn):
2594 2597 base, name = os.path.split(fn)
2595 2598 assert name.startswith('journal')
2596 2599 return os.path.join(base, name.replace('journal', 'undo', 1))
2597 2600
2598 2601 def instance(ui, path, create):
2599 2602 return localrepository(ui, util.urllocalpath(path), create)
2600 2603
2601 2604 def islocal(path):
2602 2605 return True
General Comments 0
You need to be logged in to leave comments. Login now