##// END OF EJS Templates
localrepo: use "vfs" constructor/field for initialization around "store"
FUJIWARA Katsunori -
r17654:1dc37491 default
parent child Browse files
Show More
@@ -1,2610 +1,2610
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28 28
29 29 class localpeer(peer.peerrepository):
30 30 '''peer for a local repo; reflects only the most recent API'''
31 31
32 32 def __init__(self, repo, caps=MODERNCAPS):
33 33 peer.peerrepository.__init__(self)
34 34 self._repo = repo
35 35 self.ui = repo.ui
36 36 self._caps = repo._restrictcapabilities(caps)
37 37 self.requirements = repo.requirements
38 38 self.supportedformats = repo.supportedformats
39 39
40 40 def close(self):
41 41 self._repo.close()
42 42
43 43 def _capabilities(self):
44 44 return self._caps
45 45
46 46 def local(self):
47 47 return self._repo
48 48
49 49 def canpush(self):
50 50 return True
51 51
52 52 def url(self):
53 53 return self._repo.url()
54 54
55 55 def lookup(self, key):
56 56 return self._repo.lookup(key)
57 57
58 58 def branchmap(self):
59 59 return discovery.visiblebranchmap(self._repo)
60 60
61 61 def heads(self):
62 62 return discovery.visibleheads(self._repo)
63 63
64 64 def known(self, nodes):
65 65 return self._repo.known(nodes)
66 66
67 67 def getbundle(self, source, heads=None, common=None):
68 68 return self._repo.getbundle(source, heads=heads, common=common)
69 69
70 70 # TODO We might want to move the next two calls into legacypeer and add
71 71 # unbundle instead.
72 72
73 73 def lock(self):
74 74 return self._repo.lock()
75 75
76 76 def addchangegroup(self, cg, source, url):
77 77 return self._repo.addchangegroup(cg, source, url)
78 78
79 79 def pushkey(self, namespace, key, old, new):
80 80 return self._repo.pushkey(namespace, key, old, new)
81 81
82 82 def listkeys(self, namespace):
83 83 return self._repo.listkeys(namespace)
84 84
85 85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 86 '''used to test argument passing over the wire'''
87 87 return "%s %s %s %s %s" % (one, two, three, four, five)
88 88
89 89 class locallegacypeer(localpeer):
90 90 '''peer extension which implements legacy methods too; used for tests with
91 91 restricted capabilities'''
92 92
93 93 def __init__(self, repo):
94 94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95 95
96 96 def branches(self, nodes):
97 97 return self._repo.branches(nodes)
98 98
99 99 def between(self, pairs):
100 100 return self._repo.between(pairs)
101 101
102 102 def changegroup(self, basenodes, source):
103 103 return self._repo.changegroup(basenodes, source)
104 104
105 105 def changegroupsubset(self, bases, heads, source):
106 106 return self._repo.changegroupsubset(bases, heads, source)
107 107
108 108 class localrepository(object):
109 109
110 110 supportedformats = set(('revlogv1', 'generaldelta'))
111 111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 112 'dotencode'))
113 113 openerreqs = set(('revlogv1', 'generaldelta'))
114 114 requirements = ['revlogv1']
115 115
116 116 def _baserequirements(self, create):
117 117 return self.requirements[:]
118 118
119 119 def __init__(self, baseui, path=None, create=False):
120 120 self.wvfs = scmutil.vfs(path, expand=True)
121 121 self.wopener = self.wvfs
122 122 self.root = self.wvfs.base
123 123 self.path = self.wvfs.join(".hg")
124 124 self.origroot = path
125 125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 126 self.vfs = scmutil.vfs(self.path)
127 127 self.opener = self.vfs
128 128 self.baseui = baseui
129 129 self.ui = baseui.copy()
130 130 # A list of callback to shape the phase if no data were found.
131 131 # Callback are in the form: func(repo, roots) --> processed root.
132 132 # This list it to be filled by extension during repo setup
133 133 self._phasedefaults = []
134 134 try:
135 135 self.ui.readconfig(self.join("hgrc"), self.root)
136 136 extensions.loadall(self.ui)
137 137 except IOError:
138 138 pass
139 139
140 140 if not self.vfs.isdir():
141 141 if create:
142 142 if not self.wvfs.exists():
143 143 self.wvfs.makedirs()
144 144 self.vfs.makedir(notindexed=True)
145 145 requirements = self._baserequirements(create)
146 146 if self.ui.configbool('format', 'usestore', True):
147 147 self.vfs.mkdir("store")
148 148 requirements.append("store")
149 149 if self.ui.configbool('format', 'usefncache', True):
150 150 requirements.append("fncache")
151 151 if self.ui.configbool('format', 'dotencode', True):
152 152 requirements.append('dotencode')
153 153 # create an invalid changelog
154 154 self.vfs.append(
155 155 "00changelog.i",
156 156 '\0\0\0\2' # represents revlogv2
157 157 ' dummy changelog to prevent using the old repo layout'
158 158 )
159 159 if self.ui.configbool('format', 'generaldelta', False):
160 160 requirements.append("generaldelta")
161 161 requirements = set(requirements)
162 162 else:
163 163 raise error.RepoError(_("repository %s not found") % path)
164 164 elif create:
165 165 raise error.RepoError(_("repository %s already exists") % path)
166 166 else:
167 167 try:
168 168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 169 except IOError, inst:
170 170 if inst.errno != errno.ENOENT:
171 171 raise
172 172 requirements = set()
173 173
174 174 self.sharedpath = self.path
175 175 try:
176 176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 177 if not os.path.exists(s):
178 178 raise error.RepoError(
179 179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 180 self.sharedpath = s
181 181 except IOError, inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184
185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
186 186 self.spath = self.store.path
187 self.sopener = self.store.opener
188 self.svfs = self.sopener
187 self.svfs = self.store.vfs
188 self.sopener = self.svfs
189 189 self.sjoin = self.store.join
190 self.opener.createmode = self.store.createmode
190 self.vfs.createmode = self.store.createmode
191 191 self._applyrequirements(requirements)
192 192 if create:
193 193 self._writerequirements()
194 194
195 195
196 196 self._branchcache = None
197 197 self._branchcachetip = None
198 198 self.filterpats = {}
199 199 self._datafilters = {}
200 200 self._transref = self._lockref = self._wlockref = None
201 201
202 202 # A cache for various files under .hg/ that tracks file changes,
203 203 # (used by the filecache decorator)
204 204 #
205 205 # Maps a property name to its util.filecacheentry
206 206 self._filecache = {}
207 207
208 208 def close(self):
209 209 pass
210 210
211 211 def _restrictcapabilities(self, caps):
212 212 return caps
213 213
214 214 def _applyrequirements(self, requirements):
215 215 self.requirements = requirements
216 216 self.sopener.options = dict((r, 1) for r in requirements
217 217 if r in self.openerreqs)
218 218
219 219 def _writerequirements(self):
220 220 reqfile = self.opener("requires", "w")
221 221 for r in self.requirements:
222 222 reqfile.write("%s\n" % r)
223 223 reqfile.close()
224 224
225 225 def _checknested(self, path):
226 226 """Determine if path is a legal nested repository."""
227 227 if not path.startswith(self.root):
228 228 return False
229 229 subpath = path[len(self.root) + 1:]
230 230 normsubpath = util.pconvert(subpath)
231 231
232 232 # XXX: Checking against the current working copy is wrong in
233 233 # the sense that it can reject things like
234 234 #
235 235 # $ hg cat -r 10 sub/x.txt
236 236 #
237 237 # if sub/ is no longer a subrepository in the working copy
238 238 # parent revision.
239 239 #
240 240 # However, it can of course also allow things that would have
241 241 # been rejected before, such as the above cat command if sub/
242 242 # is a subrepository now, but was a normal directory before.
243 243 # The old path auditor would have rejected by mistake since it
244 244 # panics when it sees sub/.hg/.
245 245 #
246 246 # All in all, checking against the working copy seems sensible
247 247 # since we want to prevent access to nested repositories on
248 248 # the filesystem *now*.
249 249 ctx = self[None]
250 250 parts = util.splitpath(subpath)
251 251 while parts:
252 252 prefix = '/'.join(parts)
253 253 if prefix in ctx.substate:
254 254 if prefix == normsubpath:
255 255 return True
256 256 else:
257 257 sub = ctx.sub(prefix)
258 258 return sub.checknested(subpath[len(prefix) + 1:])
259 259 else:
260 260 parts.pop()
261 261 return False
262 262
263 263 def peer(self):
264 264 return localpeer(self) # not cached to avoid reference cycle
265 265
266 266 @filecache('bookmarks')
267 267 def _bookmarks(self):
268 268 return bookmarks.read(self)
269 269
270 270 @filecache('bookmarks.current')
271 271 def _bookmarkcurrent(self):
272 272 return bookmarks.readcurrent(self)
273 273
274 274 def _writebookmarks(self, marks):
275 275 bookmarks.write(self)
276 276
277 277 def bookmarkheads(self, bookmark):
278 278 name = bookmark.split('@', 1)[0]
279 279 heads = []
280 280 for mark, n in self._bookmarks.iteritems():
281 281 if mark.split('@', 1)[0] == name:
282 282 heads.append(n)
283 283 return heads
284 284
285 285 @storecache('phaseroots')
286 286 def _phasecache(self):
287 287 return phases.phasecache(self, self._phasedefaults)
288 288
289 289 @storecache('obsstore')
290 290 def obsstore(self):
291 291 store = obsolete.obsstore(self.sopener)
292 292 if store and not obsolete._enabled:
293 293 # message is rare enough to not be translated
294 294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 295 self.ui.warn(msg % len(list(store)))
296 296 return store
297 297
298 298 @propertycache
299 299 def hiddenrevs(self):
300 300 """hiddenrevs: revs that should be hidden by command and tools
301 301
302 302 This set is carried on the repo to ease initialization and lazy
303 303 loading; it'll probably move back to changelog for efficiency and
304 304 consistency reasons.
305 305
306 306 Note that the hiddenrevs will needs invalidations when
307 307 - a new changesets is added (possible unstable above extinct)
308 308 - a new obsolete marker is added (possible new extinct changeset)
309 309
310 310 hidden changesets cannot have non-hidden descendants
311 311 """
312 312 hidden = set()
313 313 if self.obsstore:
314 314 ### hide extinct changeset that are not accessible by any mean
315 315 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
316 316 hidden.update(self.revs(hiddenquery))
317 317 return hidden
318 318
319 319 @storecache('00changelog.i')
320 320 def changelog(self):
321 321 c = changelog.changelog(self.sopener)
322 322 if 'HG_PENDING' in os.environ:
323 323 p = os.environ['HG_PENDING']
324 324 if p.startswith(self.root):
325 325 c.readpending('00changelog.i.a')
326 326 return c
327 327
328 328 @storecache('00manifest.i')
329 329 def manifest(self):
330 330 return manifest.manifest(self.sopener)
331 331
332 332 @filecache('dirstate')
333 333 def dirstate(self):
334 334 warned = [0]
335 335 def validate(node):
336 336 try:
337 337 self.changelog.rev(node)
338 338 return node
339 339 except error.LookupError:
340 340 if not warned[0]:
341 341 warned[0] = True
342 342 self.ui.warn(_("warning: ignoring unknown"
343 343 " working parent %s!\n") % short(node))
344 344 return nullid
345 345
346 346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
347 347
348 348 def __getitem__(self, changeid):
349 349 if changeid is None:
350 350 return context.workingctx(self)
351 351 return context.changectx(self, changeid)
352 352
353 353 def __contains__(self, changeid):
354 354 try:
355 355 return bool(self.lookup(changeid))
356 356 except error.RepoLookupError:
357 357 return False
358 358
359 359 def __nonzero__(self):
360 360 return True
361 361
362 362 def __len__(self):
363 363 return len(self.changelog)
364 364
365 365 def __iter__(self):
366 366 for i in xrange(len(self)):
367 367 yield i
368 368
369 369 def revs(self, expr, *args):
370 370 '''Return a list of revisions matching the given revset'''
371 371 expr = revset.formatspec(expr, *args)
372 372 m = revset.match(None, expr)
373 373 return [r for r in m(self, range(len(self)))]
374 374
375 375 def set(self, expr, *args):
376 376 '''
377 377 Yield a context for each matching revision, after doing arg
378 378 replacement via revset.formatspec
379 379 '''
380 380 for r in self.revs(expr, *args):
381 381 yield self[r]
382 382
383 383 def url(self):
384 384 return 'file:' + self.root
385 385
386 386 def hook(self, name, throw=False, **args):
387 387 return hook.hook(self.ui, self, name, throw, **args)
388 388
389 389 tag_disallowed = ':\r\n'
390 390
391 391 def _tag(self, names, node, message, local, user, date, extra={}):
392 392 if isinstance(names, str):
393 393 allchars = names
394 394 names = (names,)
395 395 else:
396 396 allchars = ''.join(names)
397 397 for c in self.tag_disallowed:
398 398 if c in allchars:
399 399 raise util.Abort(_('%r cannot be used in a tag name') % c)
400 400
401 401 branches = self.branchmap()
402 402 for name in names:
403 403 self.hook('pretag', throw=True, node=hex(node), tag=name,
404 404 local=local)
405 405 if name in branches:
406 406 self.ui.warn(_("warning: tag %s conflicts with existing"
407 407 " branch name\n") % name)
408 408
409 409 def writetags(fp, names, munge, prevtags):
410 410 fp.seek(0, 2)
411 411 if prevtags and prevtags[-1] != '\n':
412 412 fp.write('\n')
413 413 for name in names:
414 414 m = munge and munge(name) or name
415 415 if (self._tagscache.tagtypes and
416 416 name in self._tagscache.tagtypes):
417 417 old = self.tags().get(name, nullid)
418 418 fp.write('%s %s\n' % (hex(old), m))
419 419 fp.write('%s %s\n' % (hex(node), m))
420 420 fp.close()
421 421
422 422 prevtags = ''
423 423 if local:
424 424 try:
425 425 fp = self.opener('localtags', 'r+')
426 426 except IOError:
427 427 fp = self.opener('localtags', 'a')
428 428 else:
429 429 prevtags = fp.read()
430 430
431 431 # local tags are stored in the current charset
432 432 writetags(fp, names, None, prevtags)
433 433 for name in names:
434 434 self.hook('tag', node=hex(node), tag=name, local=local)
435 435 return
436 436
437 437 try:
438 438 fp = self.wfile('.hgtags', 'rb+')
439 439 except IOError, e:
440 440 if e.errno != errno.ENOENT:
441 441 raise
442 442 fp = self.wfile('.hgtags', 'ab')
443 443 else:
444 444 prevtags = fp.read()
445 445
446 446 # committed tags are stored in UTF-8
447 447 writetags(fp, names, encoding.fromlocal, prevtags)
448 448
449 449 fp.close()
450 450
451 451 self.invalidatecaches()
452 452
453 453 if '.hgtags' not in self.dirstate:
454 454 self[None].add(['.hgtags'])
455 455
456 456 m = matchmod.exact(self.root, '', ['.hgtags'])
457 457 tagnode = self.commit(message, user, date, extra=extra, match=m)
458 458
459 459 for name in names:
460 460 self.hook('tag', node=hex(node), tag=name, local=local)
461 461
462 462 return tagnode
463 463
464 464 def tag(self, names, node, message, local, user, date):
465 465 '''tag a revision with one or more symbolic names.
466 466
467 467 names is a list of strings or, when adding a single tag, names may be a
468 468 string.
469 469
470 470 if local is True, the tags are stored in a per-repository file.
471 471 otherwise, they are stored in the .hgtags file, and a new
472 472 changeset is committed with the change.
473 473
474 474 keyword arguments:
475 475
476 476 local: whether to store tags in non-version-controlled file
477 477 (default False)
478 478
479 479 message: commit message to use if committing
480 480
481 481 user: name of user to use if committing
482 482
483 483 date: date tuple to use if committing'''
484 484
485 485 if not local:
486 486 for x in self.status()[:5]:
487 487 if '.hgtags' in x:
488 488 raise util.Abort(_('working copy of .hgtags is changed '
489 489 '(please commit .hgtags manually)'))
490 490
491 491 self.tags() # instantiate the cache
492 492 self._tag(names, node, message, local, user, date)
493 493
494 494 @propertycache
495 495 def _tagscache(self):
496 496 '''Returns a tagscache object that contains various tags related
497 497 caches.'''
498 498
499 499 # This simplifies its cache management by having one decorated
500 500 # function (this one) and the rest simply fetch things from it.
501 501 class tagscache(object):
502 502 def __init__(self):
503 503 # These two define the set of tags for this repository. tags
504 504 # maps tag name to node; tagtypes maps tag name to 'global' or
505 505 # 'local'. (Global tags are defined by .hgtags across all
506 506 # heads, and local tags are defined in .hg/localtags.)
507 507 # They constitute the in-memory cache of tags.
508 508 self.tags = self.tagtypes = None
509 509
510 510 self.nodetagscache = self.tagslist = None
511 511
512 512 cache = tagscache()
513 513 cache.tags, cache.tagtypes = self._findtags()
514 514
515 515 return cache
516 516
517 517 def tags(self):
518 518 '''return a mapping of tag to node'''
519 519 t = {}
520 520 for k, v in self._tagscache.tags.iteritems():
521 521 try:
522 522 # ignore tags to unknown nodes
523 523 self.changelog.rev(v)
524 524 t[k] = v
525 525 except (error.LookupError, ValueError):
526 526 pass
527 527 return t
528 528
529 529 def _findtags(self):
530 530 '''Do the hard work of finding tags. Return a pair of dicts
531 531 (tags, tagtypes) where tags maps tag name to node, and tagtypes
532 532 maps tag name to a string like \'global\' or \'local\'.
533 533 Subclasses or extensions are free to add their own tags, but
534 534 should be aware that the returned dicts will be retained for the
535 535 duration of the localrepo object.'''
536 536
537 537 # XXX what tagtype should subclasses/extensions use? Currently
538 538 # mq and bookmarks add tags, but do not set the tagtype at all.
539 539 # Should each extension invent its own tag type? Should there
540 540 # be one tagtype for all such "virtual" tags? Or is the status
541 541 # quo fine?
542 542
543 543 alltags = {} # map tag name to (node, hist)
544 544 tagtypes = {}
545 545
546 546 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
547 547 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
548 548
549 549 # Build the return dicts. Have to re-encode tag names because
550 550 # the tags module always uses UTF-8 (in order not to lose info
551 551 # writing to the cache), but the rest of Mercurial wants them in
552 552 # local encoding.
553 553 tags = {}
554 554 for (name, (node, hist)) in alltags.iteritems():
555 555 if node != nullid:
556 556 tags[encoding.tolocal(name)] = node
557 557 tags['tip'] = self.changelog.tip()
558 558 tagtypes = dict([(encoding.tolocal(name), value)
559 559 for (name, value) in tagtypes.iteritems()])
560 560 return (tags, tagtypes)
561 561
562 562 def tagtype(self, tagname):
563 563 '''
564 564 return the type of the given tag. result can be:
565 565
566 566 'local' : a local tag
567 567 'global' : a global tag
568 568 None : tag does not exist
569 569 '''
570 570
571 571 return self._tagscache.tagtypes.get(tagname)
572 572
573 573 def tagslist(self):
574 574 '''return a list of tags ordered by revision'''
575 575 if not self._tagscache.tagslist:
576 576 l = []
577 577 for t, n in self.tags().iteritems():
578 578 r = self.changelog.rev(n)
579 579 l.append((r, t, n))
580 580 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
581 581
582 582 return self._tagscache.tagslist
583 583
584 584 def nodetags(self, node):
585 585 '''return the tags associated with a node'''
586 586 if not self._tagscache.nodetagscache:
587 587 nodetagscache = {}
588 588 for t, n in self._tagscache.tags.iteritems():
589 589 nodetagscache.setdefault(n, []).append(t)
590 590 for tags in nodetagscache.itervalues():
591 591 tags.sort()
592 592 self._tagscache.nodetagscache = nodetagscache
593 593 return self._tagscache.nodetagscache.get(node, [])
594 594
595 595 def nodebookmarks(self, node):
596 596 marks = []
597 597 for bookmark, n in self._bookmarks.iteritems():
598 598 if n == node:
599 599 marks.append(bookmark)
600 600 return sorted(marks)
601 601
602 602 def _branchtags(self, partial, lrev):
603 603 # TODO: rename this function?
604 604 tiprev = len(self) - 1
605 605 if lrev != tiprev:
606 606 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
607 607 self._updatebranchcache(partial, ctxgen)
608 608 self._writebranchcache(partial, self.changelog.tip(), tiprev)
609 609
610 610 return partial
611 611
612 612 def updatebranchcache(self):
613 613 tip = self.changelog.tip()
614 614 if self._branchcache is not None and self._branchcachetip == tip:
615 615 return
616 616
617 617 oldtip = self._branchcachetip
618 618 self._branchcachetip = tip
619 619 if oldtip is None or oldtip not in self.changelog.nodemap:
620 620 partial, last, lrev = self._readbranchcache()
621 621 else:
622 622 lrev = self.changelog.rev(oldtip)
623 623 partial = self._branchcache
624 624
625 625 self._branchtags(partial, lrev)
626 626 # this private cache holds all heads (not just the branch tips)
627 627 self._branchcache = partial
628 628
629 629 def branchmap(self):
630 630 '''returns a dictionary {branch: [branchheads]}'''
631 631 self.updatebranchcache()
632 632 return self._branchcache
633 633
634 634 def _branchtip(self, heads):
635 635 '''return the tipmost branch head in heads'''
636 636 tip = heads[-1]
637 637 for h in reversed(heads):
638 638 if not self[h].closesbranch():
639 639 tip = h
640 640 break
641 641 return tip
642 642
643 643 def branchtip(self, branch):
644 644 '''return the tip node for a given branch'''
645 645 if branch not in self.branchmap():
646 646 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
647 647 return self._branchtip(self.branchmap()[branch])
648 648
649 649 def branchtags(self):
650 650 '''return a dict where branch names map to the tipmost head of
651 651 the branch, open heads come before closed'''
652 652 bt = {}
653 653 for bn, heads in self.branchmap().iteritems():
654 654 bt[bn] = self._branchtip(heads)
655 655 return bt
656 656
657 657 def _readbranchcache(self):
658 658 partial = {}
659 659 try:
660 660 f = self.opener("cache/branchheads")
661 661 lines = f.read().split('\n')
662 662 f.close()
663 663 except (IOError, OSError):
664 664 return {}, nullid, nullrev
665 665
666 666 try:
667 667 last, lrev = lines.pop(0).split(" ", 1)
668 668 last, lrev = bin(last), int(lrev)
669 669 if lrev >= len(self) or self[lrev].node() != last:
670 670 # invalidate the cache
671 671 raise ValueError('invalidating branch cache (tip differs)')
672 672 for l in lines:
673 673 if not l:
674 674 continue
675 675 node, label = l.split(" ", 1)
676 676 label = encoding.tolocal(label.strip())
677 677 if not node in self:
678 678 raise ValueError('invalidating branch cache because node '+
679 679 '%s does not exist' % node)
680 680 partial.setdefault(label, []).append(bin(node))
681 681 except KeyboardInterrupt:
682 682 raise
683 683 except Exception, inst:
684 684 if self.ui.debugflag:
685 685 self.ui.warn(str(inst), '\n')
686 686 partial, last, lrev = {}, nullid, nullrev
687 687 return partial, last, lrev
688 688
689 689 def _writebranchcache(self, branches, tip, tiprev):
690 690 try:
691 691 f = self.opener("cache/branchheads", "w", atomictemp=True)
692 692 f.write("%s %s\n" % (hex(tip), tiprev))
693 693 for label, nodes in branches.iteritems():
694 694 for node in nodes:
695 695 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
696 696 f.close()
697 697 except (IOError, OSError):
698 698 pass
699 699
700 700 def _updatebranchcache(self, partial, ctxgen):
701 701 """Given a branchhead cache, partial, that may have extra nodes or be
702 702 missing heads, and a generator of nodes that are at least a superset of
703 703 heads missing, this function updates partial to be correct.
704 704 """
705 705 # collect new branch entries
706 706 newbranches = {}
707 707 for c in ctxgen:
708 708 newbranches.setdefault(c.branch(), []).append(c.node())
709 709 # if older branchheads are reachable from new ones, they aren't
710 710 # really branchheads. Note checking parents is insufficient:
711 711 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
712 712 for branch, newnodes in newbranches.iteritems():
713 713 bheads = partial.setdefault(branch, [])
714 714 # Remove candidate heads that no longer are in the repo (e.g., as
715 715 # the result of a strip that just happened). Avoid using 'node in
716 716 # self' here because that dives down into branchcache code somewhat
717 717 # recursively.
718 718 bheadrevs = [self.changelog.rev(node) for node in bheads
719 719 if self.changelog.hasnode(node)]
720 720 newheadrevs = [self.changelog.rev(node) for node in newnodes
721 721 if self.changelog.hasnode(node)]
722 722 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
723 723 # Remove duplicates - nodes that are in newheadrevs and are already
724 724 # in bheadrevs. This can happen if you strip a node whose parent
725 725 # was already a head (because they're on different branches).
726 726 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
727 727
728 728 # Starting from tip means fewer passes over reachable. If we know
729 729 # the new candidates are not ancestors of existing heads, we don't
730 730 # have to examine ancestors of existing heads
731 731 if ctxisnew:
732 732 iterrevs = sorted(newheadrevs)
733 733 else:
734 734 iterrevs = list(bheadrevs)
735 735
736 736 # This loop prunes out two kinds of heads - heads that are
737 737 # superseded by a head in newheadrevs, and newheadrevs that are not
738 738 # heads because an existing head is their descendant.
739 739 while iterrevs:
740 740 latest = iterrevs.pop()
741 741 if latest not in bheadrevs:
742 742 continue
743 743 ancestors = set(self.changelog.ancestors([latest],
744 744 bheadrevs[0]))
745 745 if ancestors:
746 746 bheadrevs = [b for b in bheadrevs if b not in ancestors]
747 747 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
748 748
749 749 # There may be branches that cease to exist when the last commit in the
750 750 # branch was stripped. This code filters them out. Note that the
751 751 # branch that ceased to exist may not be in newbranches because
752 752 # newbranches is the set of candidate heads, which when you strip the
753 753 # last commit in a branch will be the parent branch.
754 754 for branch in partial.keys():
755 755 nodes = [head for head in partial[branch]
756 756 if self.changelog.hasnode(head)]
757 757 if not nodes:
758 758 del partial[branch]
759 759
760 760 def lookup(self, key):
761 761 return self[key].node()
762 762
763 763 def lookupbranch(self, key, remote=None):
764 764 repo = remote or self
765 765 if key in repo.branchmap():
766 766 return key
767 767
768 768 repo = (remote and remote.local()) and remote or self
769 769 return repo[key].branch()
770 770
771 771 def known(self, nodes):
772 772 nm = self.changelog.nodemap
773 773 pc = self._phasecache
774 774 result = []
775 775 for n in nodes:
776 776 r = nm.get(n)
777 777 resp = not (r is None or pc.phase(self, r) >= phases.secret)
778 778 result.append(resp)
779 779 return result
780 780
781 781 def local(self):
782 782 return self
783 783
784 784 def cancopy(self):
785 785 return self.local() # so statichttprepo's override of local() works
786 786
787 787 def join(self, f):
788 788 return os.path.join(self.path, f)
789 789
790 790 def wjoin(self, f):
791 791 return os.path.join(self.root, f)
792 792
793 793 def file(self, f):
794 794 if f[0] == '/':
795 795 f = f[1:]
796 796 return filelog.filelog(self.sopener, f)
797 797
798 798 def changectx(self, changeid):
799 799 return self[changeid]
800 800
801 801 def parents(self, changeid=None):
802 802 '''get list of changectxs for parents of changeid'''
803 803 return self[changeid].parents()
804 804
805 805 def setparents(self, p1, p2=nullid):
806 806 copies = self.dirstate.setparents(p1, p2)
807 807 if copies:
808 808 # Adjust copy records, the dirstate cannot do it, it
809 809 # requires access to parents manifests. Preserve them
810 810 # only for entries added to first parent.
811 811 pctx = self[p1]
812 812 for f in copies:
813 813 if f not in pctx and copies[f] in pctx:
814 814 self.dirstate.copy(copies[f], f)
815 815
816 816 def filectx(self, path, changeid=None, fileid=None):
817 817 """changeid can be a changeset revision, node, or tag.
818 818 fileid can be a file revision or node."""
819 819 return context.filectx(self, path, changeid, fileid)
820 820
821 821 def getcwd(self):
822 822 return self.dirstate.getcwd()
823 823
824 824 def pathto(self, f, cwd=None):
825 825 return self.dirstate.pathto(f, cwd)
826 826
827 827 def wfile(self, f, mode='r'):
828 828 return self.wopener(f, mode)
829 829
830 830 def _link(self, f):
831 831 return os.path.islink(self.wjoin(f))
832 832
833 833 def _loadfilter(self, filter):
834 834 if filter not in self.filterpats:
835 835 l = []
836 836 for pat, cmd in self.ui.configitems(filter):
837 837 if cmd == '!':
838 838 continue
839 839 mf = matchmod.match(self.root, '', [pat])
840 840 fn = None
841 841 params = cmd
842 842 for name, filterfn in self._datafilters.iteritems():
843 843 if cmd.startswith(name):
844 844 fn = filterfn
845 845 params = cmd[len(name):].lstrip()
846 846 break
847 847 if not fn:
848 848 fn = lambda s, c, **kwargs: util.filter(s, c)
849 849 # Wrap old filters not supporting keyword arguments
850 850 if not inspect.getargspec(fn)[2]:
851 851 oldfn = fn
852 852 fn = lambda s, c, **kwargs: oldfn(s, c)
853 853 l.append((mf, fn, params))
854 854 self.filterpats[filter] = l
855 855 return self.filterpats[filter]
856 856
857 857 def _filter(self, filterpats, filename, data):
858 858 for mf, fn, cmd in filterpats:
859 859 if mf(filename):
860 860 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
861 861 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
862 862 break
863 863
864 864 return data
865 865
866 866 @propertycache
867 867 def _encodefilterpats(self):
868 868 return self._loadfilter('encode')
869 869
870 870 @propertycache
871 871 def _decodefilterpats(self):
872 872 return self._loadfilter('decode')
873 873
874 874 def adddatafilter(self, name, filter):
875 875 self._datafilters[name] = filter
876 876
877 877 def wread(self, filename):
878 878 if self._link(filename):
879 879 data = os.readlink(self.wjoin(filename))
880 880 else:
881 881 data = self.wopener.read(filename)
882 882 return self._filter(self._encodefilterpats, filename, data)
883 883
884 884 def wwrite(self, filename, data, flags):
885 885 data = self._filter(self._decodefilterpats, filename, data)
886 886 if 'l' in flags:
887 887 self.wopener.symlink(data, filename)
888 888 else:
889 889 self.wopener.write(filename, data)
890 890 if 'x' in flags:
891 891 util.setflags(self.wjoin(filename), False, True)
892 892
893 893 def wwritedata(self, filename, data):
894 894 return self._filter(self._decodefilterpats, filename, data)
895 895
896 896 def transaction(self, desc):
897 897 tr = self._transref and self._transref() or None
898 898 if tr and tr.running():
899 899 return tr.nest()
900 900
901 901 # abort here if the journal already exists
902 902 if os.path.exists(self.sjoin("journal")):
903 903 raise error.RepoError(
904 904 _("abandoned transaction found - run hg recover"))
905 905
906 906 self._writejournal(desc)
907 907 renames = [(x, undoname(x)) for x in self._journalfiles()]
908 908
909 909 tr = transaction.transaction(self.ui.warn, self.sopener,
910 910 self.sjoin("journal"),
911 911 aftertrans(renames),
912 912 self.store.createmode)
913 913 self._transref = weakref.ref(tr)
914 914 return tr
915 915
916 916 def _journalfiles(self):
917 917 return (self.sjoin('journal'), self.join('journal.dirstate'),
918 918 self.join('journal.branch'), self.join('journal.desc'),
919 919 self.join('journal.bookmarks'),
920 920 self.sjoin('journal.phaseroots'))
921 921
922 922 def undofiles(self):
923 923 return [undoname(x) for x in self._journalfiles()]
924 924
925 925 def _writejournal(self, desc):
926 926 self.opener.write("journal.dirstate",
927 927 self.opener.tryread("dirstate"))
928 928 self.opener.write("journal.branch",
929 929 encoding.fromlocal(self.dirstate.branch()))
930 930 self.opener.write("journal.desc",
931 931 "%d\n%s\n" % (len(self), desc))
932 932 self.opener.write("journal.bookmarks",
933 933 self.opener.tryread("bookmarks"))
934 934 self.sopener.write("journal.phaseroots",
935 935 self.sopener.tryread("phaseroots"))
936 936
937 937 def recover(self):
938 938 lock = self.lock()
939 939 try:
940 940 if os.path.exists(self.sjoin("journal")):
941 941 self.ui.status(_("rolling back interrupted transaction\n"))
942 942 transaction.rollback(self.sopener, self.sjoin("journal"),
943 943 self.ui.warn)
944 944 self.invalidate()
945 945 return True
946 946 else:
947 947 self.ui.warn(_("no interrupted transaction available\n"))
948 948 return False
949 949 finally:
950 950 lock.release()
951 951
952 952 def rollback(self, dryrun=False, force=False):
953 953 wlock = lock = None
954 954 try:
955 955 wlock = self.wlock()
956 956 lock = self.lock()
957 957 if os.path.exists(self.sjoin("undo")):
958 958 return self._rollback(dryrun, force)
959 959 else:
960 960 self.ui.warn(_("no rollback information available\n"))
961 961 return 1
962 962 finally:
963 963 release(lock, wlock)
964 964
965 965 def _rollback(self, dryrun, force):
966 966 ui = self.ui
967 967 try:
968 968 args = self.opener.read('undo.desc').splitlines()
969 969 (oldlen, desc, detail) = (int(args[0]), args[1], None)
970 970 if len(args) >= 3:
971 971 detail = args[2]
972 972 oldtip = oldlen - 1
973 973
974 974 if detail and ui.verbose:
975 975 msg = (_('repository tip rolled back to revision %s'
976 976 ' (undo %s: %s)\n')
977 977 % (oldtip, desc, detail))
978 978 else:
979 979 msg = (_('repository tip rolled back to revision %s'
980 980 ' (undo %s)\n')
981 981 % (oldtip, desc))
982 982 except IOError:
983 983 msg = _('rolling back unknown transaction\n')
984 984 desc = None
985 985
986 986 if not force and self['.'] != self['tip'] and desc == 'commit':
987 987 raise util.Abort(
988 988 _('rollback of last commit while not checked out '
989 989 'may lose data'), hint=_('use -f to force'))
990 990
991 991 ui.status(msg)
992 992 if dryrun:
993 993 return 0
994 994
995 995 parents = self.dirstate.parents()
996 996 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
997 997 if os.path.exists(self.join('undo.bookmarks')):
998 998 util.rename(self.join('undo.bookmarks'),
999 999 self.join('bookmarks'))
1000 1000 if os.path.exists(self.sjoin('undo.phaseroots')):
1001 1001 util.rename(self.sjoin('undo.phaseroots'),
1002 1002 self.sjoin('phaseroots'))
1003 1003 self.invalidate()
1004 1004
1005 1005 # Discard all cache entries to force reloading everything.
1006 1006 self._filecache.clear()
1007 1007
1008 1008 parentgone = (parents[0] not in self.changelog.nodemap or
1009 1009 parents[1] not in self.changelog.nodemap)
1010 1010 if parentgone:
1011 1011 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1012 1012 try:
1013 1013 branch = self.opener.read('undo.branch')
1014 1014 self.dirstate.setbranch(encoding.tolocal(branch))
1015 1015 except IOError:
1016 1016 ui.warn(_('named branch could not be reset: '
1017 1017 'current branch is still \'%s\'\n')
1018 1018 % self.dirstate.branch())
1019 1019
1020 1020 self.dirstate.invalidate()
1021 1021 parents = tuple([p.rev() for p in self.parents()])
1022 1022 if len(parents) > 1:
1023 1023 ui.status(_('working directory now based on '
1024 1024 'revisions %d and %d\n') % parents)
1025 1025 else:
1026 1026 ui.status(_('working directory now based on '
1027 1027 'revision %d\n') % parents)
1028 1028 # TODO: if we know which new heads may result from this rollback, pass
1029 1029 # them to destroy(), which will prevent the branchhead cache from being
1030 1030 # invalidated.
1031 1031 self.destroyed()
1032 1032 return 0
1033 1033
1034 1034 def invalidatecaches(self):
1035 1035 def delcache(name):
1036 1036 try:
1037 1037 delattr(self, name)
1038 1038 except AttributeError:
1039 1039 pass
1040 1040
1041 1041 delcache('_tagscache')
1042 1042
1043 1043 self._branchcache = None # in UTF-8
1044 1044 self._branchcachetip = None
1045 1045 obsolete.clearobscaches(self)
1046 1046
1047 1047 def invalidatedirstate(self):
1048 1048 '''Invalidates the dirstate, causing the next call to dirstate
1049 1049 to check if it was modified since the last time it was read,
1050 1050 rereading it if it has.
1051 1051
1052 1052 This is different to dirstate.invalidate() that it doesn't always
1053 1053 rereads the dirstate. Use dirstate.invalidate() if you want to
1054 1054 explicitly read the dirstate again (i.e. restoring it to a previous
1055 1055 known good state).'''
1056 1056 if 'dirstate' in self.__dict__:
1057 1057 for k in self.dirstate._filecache:
1058 1058 try:
1059 1059 delattr(self.dirstate, k)
1060 1060 except AttributeError:
1061 1061 pass
1062 1062 delattr(self, 'dirstate')
1063 1063
1064 1064 def invalidate(self):
1065 1065 for k in self._filecache:
1066 1066 # dirstate is invalidated separately in invalidatedirstate()
1067 1067 if k == 'dirstate':
1068 1068 continue
1069 1069
1070 1070 try:
1071 1071 delattr(self, k)
1072 1072 except AttributeError:
1073 1073 pass
1074 1074 self.invalidatecaches()
1075 1075
1076 1076 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1077 1077 try:
1078 1078 l = lock.lock(lockname, 0, releasefn, desc=desc)
1079 1079 except error.LockHeld, inst:
1080 1080 if not wait:
1081 1081 raise
1082 1082 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1083 1083 (desc, inst.locker))
1084 1084 # default to 600 seconds timeout
1085 1085 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1086 1086 releasefn, desc=desc)
1087 1087 if acquirefn:
1088 1088 acquirefn()
1089 1089 return l
1090 1090
1091 1091 def _afterlock(self, callback):
1092 1092 """add a callback to the current repository lock.
1093 1093
1094 1094 The callback will be executed on lock release."""
1095 1095 l = self._lockref and self._lockref()
1096 1096 if l:
1097 1097 l.postrelease.append(callback)
1098 1098 else:
1099 1099 callback()
1100 1100
1101 1101 def lock(self, wait=True):
1102 1102 '''Lock the repository store (.hg/store) and return a weak reference
1103 1103 to the lock. Use this before modifying the store (e.g. committing or
1104 1104 stripping). If you are opening a transaction, get a lock as well.)'''
1105 1105 l = self._lockref and self._lockref()
1106 1106 if l is not None and l.held:
1107 1107 l.lock()
1108 1108 return l
1109 1109
1110 1110 def unlock():
1111 1111 self.store.write()
1112 1112 if '_phasecache' in vars(self):
1113 1113 self._phasecache.write()
1114 1114 for k, ce in self._filecache.items():
1115 1115 if k == 'dirstate':
1116 1116 continue
1117 1117 ce.refresh()
1118 1118
1119 1119 l = self._lock(self.sjoin("lock"), wait, unlock,
1120 1120 self.invalidate, _('repository %s') % self.origroot)
1121 1121 self._lockref = weakref.ref(l)
1122 1122 return l
1123 1123
1124 1124 def wlock(self, wait=True):
1125 1125 '''Lock the non-store parts of the repository (everything under
1126 1126 .hg except .hg/store) and return a weak reference to the lock.
1127 1127 Use this before modifying files in .hg.'''
1128 1128 l = self._wlockref and self._wlockref()
1129 1129 if l is not None and l.held:
1130 1130 l.lock()
1131 1131 return l
1132 1132
1133 1133 def unlock():
1134 1134 self.dirstate.write()
1135 1135 ce = self._filecache.get('dirstate')
1136 1136 if ce:
1137 1137 ce.refresh()
1138 1138
1139 1139 l = self._lock(self.join("wlock"), wait, unlock,
1140 1140 self.invalidatedirstate, _('working directory of %s') %
1141 1141 self.origroot)
1142 1142 self._wlockref = weakref.ref(l)
1143 1143 return l
1144 1144
1145 1145 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1146 1146 """
1147 1147 commit an individual file as part of a larger transaction
1148 1148 """
1149 1149
1150 1150 fname = fctx.path()
1151 1151 text = fctx.data()
1152 1152 flog = self.file(fname)
1153 1153 fparent1 = manifest1.get(fname, nullid)
1154 1154 fparent2 = fparent2o = manifest2.get(fname, nullid)
1155 1155
1156 1156 meta = {}
1157 1157 copy = fctx.renamed()
1158 1158 if copy and copy[0] != fname:
1159 1159 # Mark the new revision of this file as a copy of another
1160 1160 # file. This copy data will effectively act as a parent
1161 1161 # of this new revision. If this is a merge, the first
1162 1162 # parent will be the nullid (meaning "look up the copy data")
1163 1163 # and the second one will be the other parent. For example:
1164 1164 #
1165 1165 # 0 --- 1 --- 3 rev1 changes file foo
1166 1166 # \ / rev2 renames foo to bar and changes it
1167 1167 # \- 2 -/ rev3 should have bar with all changes and
1168 1168 # should record that bar descends from
1169 1169 # bar in rev2 and foo in rev1
1170 1170 #
1171 1171 # this allows this merge to succeed:
1172 1172 #
1173 1173 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1174 1174 # \ / merging rev3 and rev4 should use bar@rev2
1175 1175 # \- 2 --- 4 as the merge base
1176 1176 #
1177 1177
1178 1178 cfname = copy[0]
1179 1179 crev = manifest1.get(cfname)
1180 1180 newfparent = fparent2
1181 1181
1182 1182 if manifest2: # branch merge
1183 1183 if fparent2 == nullid or crev is None: # copied on remote side
1184 1184 if cfname in manifest2:
1185 1185 crev = manifest2[cfname]
1186 1186 newfparent = fparent1
1187 1187
1188 1188 # find source in nearest ancestor if we've lost track
1189 1189 if not crev:
1190 1190 self.ui.debug(" %s: searching for copy revision for %s\n" %
1191 1191 (fname, cfname))
1192 1192 for ancestor in self[None].ancestors():
1193 1193 if cfname in ancestor:
1194 1194 crev = ancestor[cfname].filenode()
1195 1195 break
1196 1196
1197 1197 if crev:
1198 1198 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1199 1199 meta["copy"] = cfname
1200 1200 meta["copyrev"] = hex(crev)
1201 1201 fparent1, fparent2 = nullid, newfparent
1202 1202 else:
1203 1203 self.ui.warn(_("warning: can't find ancestor for '%s' "
1204 1204 "copied from '%s'!\n") % (fname, cfname))
1205 1205
1206 1206 elif fparent2 != nullid:
1207 1207 # is one parent an ancestor of the other?
1208 1208 fparentancestor = flog.ancestor(fparent1, fparent2)
1209 1209 if fparentancestor == fparent1:
1210 1210 fparent1, fparent2 = fparent2, nullid
1211 1211 elif fparentancestor == fparent2:
1212 1212 fparent2 = nullid
1213 1213
1214 1214 # is the file changed?
1215 1215 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1216 1216 changelist.append(fname)
1217 1217 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1218 1218
1219 1219 # are just the flags changed during merge?
1220 1220 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1221 1221 changelist.append(fname)
1222 1222
1223 1223 return fparent1
1224 1224
1225 1225 def commit(self, text="", user=None, date=None, match=None, force=False,
1226 1226 editor=False, extra={}):
1227 1227 """Add a new revision to current repository.
1228 1228
1229 1229 Revision information is gathered from the working directory,
1230 1230 match can be used to filter the committed files. If editor is
1231 1231 supplied, it is called to get a commit message.
1232 1232 """
1233 1233
1234 1234 def fail(f, msg):
1235 1235 raise util.Abort('%s: %s' % (f, msg))
1236 1236
1237 1237 if not match:
1238 1238 match = matchmod.always(self.root, '')
1239 1239
1240 1240 if not force:
1241 1241 vdirs = []
1242 1242 match.dir = vdirs.append
1243 1243 match.bad = fail
1244 1244
1245 1245 wlock = self.wlock()
1246 1246 try:
1247 1247 wctx = self[None]
1248 1248 merge = len(wctx.parents()) > 1
1249 1249
1250 1250 if (not force and merge and match and
1251 1251 (match.files() or match.anypats())):
1252 1252 raise util.Abort(_('cannot partially commit a merge '
1253 1253 '(do not specify files or patterns)'))
1254 1254
1255 1255 changes = self.status(match=match, clean=force)
1256 1256 if force:
1257 1257 changes[0].extend(changes[6]) # mq may commit unchanged files
1258 1258
1259 1259 # check subrepos
1260 1260 subs = []
1261 1261 commitsubs = set()
1262 1262 newstate = wctx.substate.copy()
1263 1263 # only manage subrepos and .hgsubstate if .hgsub is present
1264 1264 if '.hgsub' in wctx:
1265 1265 # we'll decide whether to track this ourselves, thanks
1266 1266 if '.hgsubstate' in changes[0]:
1267 1267 changes[0].remove('.hgsubstate')
1268 1268 if '.hgsubstate' in changes[2]:
1269 1269 changes[2].remove('.hgsubstate')
1270 1270
1271 1271 # compare current state to last committed state
1272 1272 # build new substate based on last committed state
1273 1273 oldstate = wctx.p1().substate
1274 1274 for s in sorted(newstate.keys()):
1275 1275 if not match(s):
1276 1276 # ignore working copy, use old state if present
1277 1277 if s in oldstate:
1278 1278 newstate[s] = oldstate[s]
1279 1279 continue
1280 1280 if not force:
1281 1281 raise util.Abort(
1282 1282 _("commit with new subrepo %s excluded") % s)
1283 1283 if wctx.sub(s).dirty(True):
1284 1284 if not self.ui.configbool('ui', 'commitsubrepos'):
1285 1285 raise util.Abort(
1286 1286 _("uncommitted changes in subrepo %s") % s,
1287 1287 hint=_("use --subrepos for recursive commit"))
1288 1288 subs.append(s)
1289 1289 commitsubs.add(s)
1290 1290 else:
1291 1291 bs = wctx.sub(s).basestate()
1292 1292 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1293 1293 if oldstate.get(s, (None, None, None))[1] != bs:
1294 1294 subs.append(s)
1295 1295
1296 1296 # check for removed subrepos
1297 1297 for p in wctx.parents():
1298 1298 r = [s for s in p.substate if s not in newstate]
1299 1299 subs += [s for s in r if match(s)]
1300 1300 if subs:
1301 1301 if (not match('.hgsub') and
1302 1302 '.hgsub' in (wctx.modified() + wctx.added())):
1303 1303 raise util.Abort(
1304 1304 _("can't commit subrepos without .hgsub"))
1305 1305 changes[0].insert(0, '.hgsubstate')
1306 1306
1307 1307 elif '.hgsub' in changes[2]:
1308 1308 # clean up .hgsubstate when .hgsub is removed
1309 1309 if ('.hgsubstate' in wctx and
1310 1310 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1311 1311 changes[2].insert(0, '.hgsubstate')
1312 1312
1313 1313 # make sure all explicit patterns are matched
1314 1314 if not force and match.files():
1315 1315 matched = set(changes[0] + changes[1] + changes[2])
1316 1316
1317 1317 for f in match.files():
1318 1318 f = self.dirstate.normalize(f)
1319 1319 if f == '.' or f in matched or f in wctx.substate:
1320 1320 continue
1321 1321 if f in changes[3]: # missing
1322 1322 fail(f, _('file not found!'))
1323 1323 if f in vdirs: # visited directory
1324 1324 d = f + '/'
1325 1325 for mf in matched:
1326 1326 if mf.startswith(d):
1327 1327 break
1328 1328 else:
1329 1329 fail(f, _("no match under directory!"))
1330 1330 elif f not in self.dirstate:
1331 1331 fail(f, _("file not tracked!"))
1332 1332
1333 1333 if (not force and not extra.get("close") and not merge
1334 1334 and not (changes[0] or changes[1] or changes[2])
1335 1335 and wctx.branch() == wctx.p1().branch()):
1336 1336 return None
1337 1337
1338 1338 if merge and changes[3]:
1339 1339 raise util.Abort(_("cannot commit merge with missing files"))
1340 1340
1341 1341 ms = mergemod.mergestate(self)
1342 1342 for f in changes[0]:
1343 1343 if f in ms and ms[f] == 'u':
1344 1344 raise util.Abort(_("unresolved merge conflicts "
1345 1345 "(see hg help resolve)"))
1346 1346
1347 1347 cctx = context.workingctx(self, text, user, date, extra, changes)
1348 1348 if editor:
1349 1349 cctx._text = editor(self, cctx, subs)
1350 1350 edited = (text != cctx._text)
1351 1351
1352 1352 # commit subs and write new state
1353 1353 if subs:
1354 1354 for s in sorted(commitsubs):
1355 1355 sub = wctx.sub(s)
1356 1356 self.ui.status(_('committing subrepository %s\n') %
1357 1357 subrepo.subrelpath(sub))
1358 1358 sr = sub.commit(cctx._text, user, date)
1359 1359 newstate[s] = (newstate[s][0], sr)
1360 1360 subrepo.writestate(self, newstate)
1361 1361
1362 1362 # Save commit message in case this transaction gets rolled back
1363 1363 # (e.g. by a pretxncommit hook). Leave the content alone on
1364 1364 # the assumption that the user will use the same editor again.
1365 1365 msgfn = self.savecommitmessage(cctx._text)
1366 1366
1367 1367 p1, p2 = self.dirstate.parents()
1368 1368 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1369 1369 try:
1370 1370 self.hook("precommit", throw=True, parent1=hookp1,
1371 1371 parent2=hookp2)
1372 1372 ret = self.commitctx(cctx, True)
1373 1373 except: # re-raises
1374 1374 if edited:
1375 1375 self.ui.write(
1376 1376 _('note: commit message saved in %s\n') % msgfn)
1377 1377 raise
1378 1378
1379 1379 # update bookmarks, dirstate and mergestate
1380 1380 bookmarks.update(self, [p1, p2], ret)
1381 1381 for f in changes[0] + changes[1]:
1382 1382 self.dirstate.normal(f)
1383 1383 for f in changes[2]:
1384 1384 self.dirstate.drop(f)
1385 1385 self.dirstate.setparents(ret)
1386 1386 ms.reset()
1387 1387 finally:
1388 1388 wlock.release()
1389 1389
1390 1390 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1391 1391 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1392 1392 self._afterlock(commithook)
1393 1393 return ret
1394 1394
1395 1395 def commitctx(self, ctx, error=False):
1396 1396 """Add a new revision to current repository.
1397 1397 Revision information is passed via the context argument.
1398 1398 """
1399 1399
1400 1400 tr = lock = None
1401 1401 removed = list(ctx.removed())
1402 1402 p1, p2 = ctx.p1(), ctx.p2()
1403 1403 user = ctx.user()
1404 1404
1405 1405 lock = self.lock()
1406 1406 try:
1407 1407 tr = self.transaction("commit")
1408 1408 trp = weakref.proxy(tr)
1409 1409
1410 1410 if ctx.files():
1411 1411 m1 = p1.manifest().copy()
1412 1412 m2 = p2.manifest()
1413 1413
1414 1414 # check in files
1415 1415 new = {}
1416 1416 changed = []
1417 1417 linkrev = len(self)
1418 1418 for f in sorted(ctx.modified() + ctx.added()):
1419 1419 self.ui.note(f + "\n")
1420 1420 try:
1421 1421 fctx = ctx[f]
1422 1422 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1423 1423 changed)
1424 1424 m1.set(f, fctx.flags())
1425 1425 except OSError, inst:
1426 1426 self.ui.warn(_("trouble committing %s!\n") % f)
1427 1427 raise
1428 1428 except IOError, inst:
1429 1429 errcode = getattr(inst, 'errno', errno.ENOENT)
1430 1430 if error or errcode and errcode != errno.ENOENT:
1431 1431 self.ui.warn(_("trouble committing %s!\n") % f)
1432 1432 raise
1433 1433 else:
1434 1434 removed.append(f)
1435 1435
1436 1436 # update manifest
1437 1437 m1.update(new)
1438 1438 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1439 1439 drop = [f for f in removed if f in m1]
1440 1440 for f in drop:
1441 1441 del m1[f]
1442 1442 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1443 1443 p2.manifestnode(), (new, drop))
1444 1444 files = changed + removed
1445 1445 else:
1446 1446 mn = p1.manifestnode()
1447 1447 files = []
1448 1448
1449 1449 # update changelog
1450 1450 self.changelog.delayupdate()
1451 1451 n = self.changelog.add(mn, files, ctx.description(),
1452 1452 trp, p1.node(), p2.node(),
1453 1453 user, ctx.date(), ctx.extra().copy())
1454 1454 p = lambda: self.changelog.writepending() and self.root or ""
1455 1455 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1456 1456 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1457 1457 parent2=xp2, pending=p)
1458 1458 self.changelog.finalize(trp)
1459 1459 # set the new commit is proper phase
1460 1460 targetphase = phases.newcommitphase(self.ui)
1461 1461 if targetphase:
1462 1462 # retract boundary do not alter parent changeset.
1463 1463 # if a parent have higher the resulting phase will
1464 1464 # be compliant anyway
1465 1465 #
1466 1466 # if minimal phase was 0 we don't need to retract anything
1467 1467 phases.retractboundary(self, targetphase, [n])
1468 1468 tr.close()
1469 1469 self.updatebranchcache()
1470 1470 return n
1471 1471 finally:
1472 1472 if tr:
1473 1473 tr.release()
1474 1474 lock.release()
1475 1475
1476 1476 def destroyed(self, newheadnodes=None):
1477 1477 '''Inform the repository that nodes have been destroyed.
1478 1478 Intended for use by strip and rollback, so there's a common
1479 1479 place for anything that has to be done after destroying history.
1480 1480
1481 1481 If you know the branchheadcache was uptodate before nodes were removed
1482 1482 and you also know the set of candidate new heads that may have resulted
1483 1483 from the destruction, you can set newheadnodes. This will enable the
1484 1484 code to update the branchheads cache, rather than having future code
1485 1485 decide it's invalid and regenerating it from scratch.
1486 1486 '''
1487 1487 # If we have info, newheadnodes, on how to update the branch cache, do
1488 1488 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1489 1489 # will be caught the next time it is read.
1490 1490 if newheadnodes:
1491 1491 tiprev = len(self) - 1
1492 1492 ctxgen = (self[node] for node in newheadnodes
1493 1493 if self.changelog.hasnode(node))
1494 1494 self._updatebranchcache(self._branchcache, ctxgen)
1495 1495 self._writebranchcache(self._branchcache, self.changelog.tip(),
1496 1496 tiprev)
1497 1497
1498 1498 # Ensure the persistent tag cache is updated. Doing it now
1499 1499 # means that the tag cache only has to worry about destroyed
1500 1500 # heads immediately after a strip/rollback. That in turn
1501 1501 # guarantees that "cachetip == currenttip" (comparing both rev
1502 1502 # and node) always means no nodes have been added or destroyed.
1503 1503
1504 1504 # XXX this is suboptimal when qrefresh'ing: we strip the current
1505 1505 # head, refresh the tag cache, then immediately add a new head.
1506 1506 # But I think doing it this way is necessary for the "instant
1507 1507 # tag cache retrieval" case to work.
1508 1508 self.invalidatecaches()
1509 1509
1510 1510 # Discard all cache entries to force reloading everything.
1511 1511 self._filecache.clear()
1512 1512
1513 1513 def walk(self, match, node=None):
1514 1514 '''
1515 1515 walk recursively through the directory tree or a given
1516 1516 changeset, finding all files matched by the match
1517 1517 function
1518 1518 '''
1519 1519 return self[node].walk(match)
1520 1520
1521 1521 def status(self, node1='.', node2=None, match=None,
1522 1522 ignored=False, clean=False, unknown=False,
1523 1523 listsubrepos=False):
1524 1524 """return status of files between two nodes or node and working
1525 1525 directory.
1526 1526
1527 1527 If node1 is None, use the first dirstate parent instead.
1528 1528 If node2 is None, compare node1 with working directory.
1529 1529 """
1530 1530
1531 1531 def mfmatches(ctx):
1532 1532 mf = ctx.manifest().copy()
1533 1533 if match.always():
1534 1534 return mf
1535 1535 for fn in mf.keys():
1536 1536 if not match(fn):
1537 1537 del mf[fn]
1538 1538 return mf
1539 1539
1540 1540 if isinstance(node1, context.changectx):
1541 1541 ctx1 = node1
1542 1542 else:
1543 1543 ctx1 = self[node1]
1544 1544 if isinstance(node2, context.changectx):
1545 1545 ctx2 = node2
1546 1546 else:
1547 1547 ctx2 = self[node2]
1548 1548
1549 1549 working = ctx2.rev() is None
1550 1550 parentworking = working and ctx1 == self['.']
1551 1551 match = match or matchmod.always(self.root, self.getcwd())
1552 1552 listignored, listclean, listunknown = ignored, clean, unknown
1553 1553
1554 1554 # load earliest manifest first for caching reasons
1555 1555 if not working and ctx2.rev() < ctx1.rev():
1556 1556 ctx2.manifest()
1557 1557
1558 1558 if not parentworking:
1559 1559 def bad(f, msg):
1560 1560 # 'f' may be a directory pattern from 'match.files()',
1561 1561 # so 'f not in ctx1' is not enough
1562 1562 if f not in ctx1 and f not in ctx1.dirs():
1563 1563 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1564 1564 match.bad = bad
1565 1565
1566 1566 if working: # we need to scan the working dir
1567 1567 subrepos = []
1568 1568 if '.hgsub' in self.dirstate:
1569 1569 subrepos = ctx2.substate.keys()
1570 1570 s = self.dirstate.status(match, subrepos, listignored,
1571 1571 listclean, listunknown)
1572 1572 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1573 1573
1574 1574 # check for any possibly clean files
1575 1575 if parentworking and cmp:
1576 1576 fixup = []
1577 1577 # do a full compare of any files that might have changed
1578 1578 for f in sorted(cmp):
1579 1579 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1580 1580 or ctx1[f].cmp(ctx2[f])):
1581 1581 modified.append(f)
1582 1582 else:
1583 1583 fixup.append(f)
1584 1584
1585 1585 # update dirstate for files that are actually clean
1586 1586 if fixup:
1587 1587 if listclean:
1588 1588 clean += fixup
1589 1589
1590 1590 try:
1591 1591 # updating the dirstate is optional
1592 1592 # so we don't wait on the lock
1593 1593 wlock = self.wlock(False)
1594 1594 try:
1595 1595 for f in fixup:
1596 1596 self.dirstate.normal(f)
1597 1597 finally:
1598 1598 wlock.release()
1599 1599 except error.LockError:
1600 1600 pass
1601 1601
1602 1602 if not parentworking:
1603 1603 mf1 = mfmatches(ctx1)
1604 1604 if working:
1605 1605 # we are comparing working dir against non-parent
1606 1606 # generate a pseudo-manifest for the working dir
1607 1607 mf2 = mfmatches(self['.'])
1608 1608 for f in cmp + modified + added:
1609 1609 mf2[f] = None
1610 1610 mf2.set(f, ctx2.flags(f))
1611 1611 for f in removed:
1612 1612 if f in mf2:
1613 1613 del mf2[f]
1614 1614 else:
1615 1615 # we are comparing two revisions
1616 1616 deleted, unknown, ignored = [], [], []
1617 1617 mf2 = mfmatches(ctx2)
1618 1618
1619 1619 modified, added, clean = [], [], []
1620 1620 withflags = mf1.withflags() | mf2.withflags()
1621 1621 for fn in mf2:
1622 1622 if fn in mf1:
1623 1623 if (fn not in deleted and
1624 1624 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1625 1625 (mf1[fn] != mf2[fn] and
1626 1626 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1627 1627 modified.append(fn)
1628 1628 elif listclean:
1629 1629 clean.append(fn)
1630 1630 del mf1[fn]
1631 1631 elif fn not in deleted:
1632 1632 added.append(fn)
1633 1633 removed = mf1.keys()
1634 1634
1635 1635 if working and modified and not self.dirstate._checklink:
1636 1636 # Symlink placeholders may get non-symlink-like contents
1637 1637 # via user error or dereferencing by NFS or Samba servers,
1638 1638 # so we filter out any placeholders that don't look like a
1639 1639 # symlink
1640 1640 sane = []
1641 1641 for f in modified:
1642 1642 if ctx2.flags(f) == 'l':
1643 1643 d = ctx2[f].data()
1644 1644 if len(d) >= 1024 or '\n' in d or util.binary(d):
1645 1645 self.ui.debug('ignoring suspect symlink placeholder'
1646 1646 ' "%s"\n' % f)
1647 1647 continue
1648 1648 sane.append(f)
1649 1649 modified = sane
1650 1650
1651 1651 r = modified, added, removed, deleted, unknown, ignored, clean
1652 1652
1653 1653 if listsubrepos:
1654 1654 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1655 1655 if working:
1656 1656 rev2 = None
1657 1657 else:
1658 1658 rev2 = ctx2.substate[subpath][1]
1659 1659 try:
1660 1660 submatch = matchmod.narrowmatcher(subpath, match)
1661 1661 s = sub.status(rev2, match=submatch, ignored=listignored,
1662 1662 clean=listclean, unknown=listunknown,
1663 1663 listsubrepos=True)
1664 1664 for rfiles, sfiles in zip(r, s):
1665 1665 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1666 1666 except error.LookupError:
1667 1667 self.ui.status(_("skipping missing subrepository: %s\n")
1668 1668 % subpath)
1669 1669
1670 1670 for l in r:
1671 1671 l.sort()
1672 1672 return r
1673 1673
1674 1674 def heads(self, start=None):
1675 1675 heads = self.changelog.heads(start)
1676 1676 # sort the output in rev descending order
1677 1677 return sorted(heads, key=self.changelog.rev, reverse=True)
1678 1678
1679 1679 def branchheads(self, branch=None, start=None, closed=False):
1680 1680 '''return a (possibly filtered) list of heads for the given branch
1681 1681
1682 1682 Heads are returned in topological order, from newest to oldest.
1683 1683 If branch is None, use the dirstate branch.
1684 1684 If start is not None, return only heads reachable from start.
1685 1685 If closed is True, return heads that are marked as closed as well.
1686 1686 '''
1687 1687 if branch is None:
1688 1688 branch = self[None].branch()
1689 1689 branches = self.branchmap()
1690 1690 if branch not in branches:
1691 1691 return []
1692 1692 # the cache returns heads ordered lowest to highest
1693 1693 bheads = list(reversed(branches[branch]))
1694 1694 if start is not None:
1695 1695 # filter out the heads that cannot be reached from startrev
1696 1696 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1697 1697 bheads = [h for h in bheads if h in fbheads]
1698 1698 if not closed:
1699 1699 bheads = [h for h in bheads if not self[h].closesbranch()]
1700 1700 return bheads
1701 1701
1702 1702 def branches(self, nodes):
1703 1703 if not nodes:
1704 1704 nodes = [self.changelog.tip()]
1705 1705 b = []
1706 1706 for n in nodes:
1707 1707 t = n
1708 1708 while True:
1709 1709 p = self.changelog.parents(n)
1710 1710 if p[1] != nullid or p[0] == nullid:
1711 1711 b.append((t, n, p[0], p[1]))
1712 1712 break
1713 1713 n = p[0]
1714 1714 return b
1715 1715
1716 1716 def between(self, pairs):
1717 1717 r = []
1718 1718
1719 1719 for top, bottom in pairs:
1720 1720 n, l, i = top, [], 0
1721 1721 f = 1
1722 1722
1723 1723 while n != bottom and n != nullid:
1724 1724 p = self.changelog.parents(n)[0]
1725 1725 if i == f:
1726 1726 l.append(n)
1727 1727 f = f * 2
1728 1728 n = p
1729 1729 i += 1
1730 1730
1731 1731 r.append(l)
1732 1732
1733 1733 return r
1734 1734
1735 1735 def pull(self, remote, heads=None, force=False):
1736 1736 # don't open transaction for nothing or you break future useful
1737 1737 # rollback call
1738 1738 tr = None
1739 1739 trname = 'pull\n' + util.hidepassword(remote.url())
1740 1740 lock = self.lock()
1741 1741 try:
1742 1742 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1743 1743 force=force)
1744 1744 common, fetch, rheads = tmp
1745 1745 if not fetch:
1746 1746 self.ui.status(_("no changes found\n"))
1747 1747 added = []
1748 1748 result = 0
1749 1749 else:
1750 1750 tr = self.transaction(trname)
1751 1751 if heads is None and list(common) == [nullid]:
1752 1752 self.ui.status(_("requesting all changes\n"))
1753 1753 elif heads is None and remote.capable('changegroupsubset'):
1754 1754 # issue1320, avoid a race if remote changed after discovery
1755 1755 heads = rheads
1756 1756
1757 1757 if remote.capable('getbundle'):
1758 1758 cg = remote.getbundle('pull', common=common,
1759 1759 heads=heads or rheads)
1760 1760 elif heads is None:
1761 1761 cg = remote.changegroup(fetch, 'pull')
1762 1762 elif not remote.capable('changegroupsubset'):
1763 1763 raise util.Abort(_("partial pull cannot be done because "
1764 1764 "other repository doesn't support "
1765 1765 "changegroupsubset."))
1766 1766 else:
1767 1767 cg = remote.changegroupsubset(fetch, heads, 'pull')
1768 1768 clstart = len(self.changelog)
1769 1769 result = self.addchangegroup(cg, 'pull', remote.url())
1770 1770 clend = len(self.changelog)
1771 1771 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1772 1772
1773 1773 # compute target subset
1774 1774 if heads is None:
1775 1775 # We pulled every thing possible
1776 1776 # sync on everything common
1777 1777 subset = common + added
1778 1778 else:
1779 1779 # We pulled a specific subset
1780 1780 # sync on this subset
1781 1781 subset = heads
1782 1782
1783 1783 # Get remote phases data from remote
1784 1784 remotephases = remote.listkeys('phases')
1785 1785 publishing = bool(remotephases.get('publishing', False))
1786 1786 if remotephases and not publishing:
1787 1787 # remote is new and unpublishing
1788 1788 pheads, _dr = phases.analyzeremotephases(self, subset,
1789 1789 remotephases)
1790 1790 phases.advanceboundary(self, phases.public, pheads)
1791 1791 phases.advanceboundary(self, phases.draft, subset)
1792 1792 else:
1793 1793 # Remote is old or publishing all common changesets
1794 1794 # should be seen as public
1795 1795 phases.advanceboundary(self, phases.public, subset)
1796 1796
1797 1797 if obsolete._enabled:
1798 1798 self.ui.debug('fetching remote obsolete markers')
1799 1799 remoteobs = remote.listkeys('obsolete')
1800 1800 if 'dump0' in remoteobs:
1801 1801 if tr is None:
1802 1802 tr = self.transaction(trname)
1803 1803 for key in sorted(remoteobs, reverse=True):
1804 1804 if key.startswith('dump'):
1805 1805 data = base85.b85decode(remoteobs[key])
1806 1806 self.obsstore.mergemarkers(tr, data)
1807 1807 if tr is not None:
1808 1808 tr.close()
1809 1809 finally:
1810 1810 if tr is not None:
1811 1811 tr.release()
1812 1812 lock.release()
1813 1813
1814 1814 return result
1815 1815
1816 1816 def checkpush(self, force, revs):
1817 1817 """Extensions can override this function if additional checks have
1818 1818 to be performed before pushing, or call it if they override push
1819 1819 command.
1820 1820 """
1821 1821 pass
1822 1822
1823 1823 def push(self, remote, force=False, revs=None, newbranch=False):
1824 1824 '''Push outgoing changesets (limited by revs) from the current
1825 1825 repository to remote. Return an integer:
1826 1826 - None means nothing to push
1827 1827 - 0 means HTTP error
1828 1828 - 1 means we pushed and remote head count is unchanged *or*
1829 1829 we have outgoing changesets but refused to push
1830 1830 - other values as described by addchangegroup()
1831 1831 '''
1832 1832 # there are two ways to push to remote repo:
1833 1833 #
1834 1834 # addchangegroup assumes local user can lock remote
1835 1835 # repo (local filesystem, old ssh servers).
1836 1836 #
1837 1837 # unbundle assumes local user cannot lock remote repo (new ssh
1838 1838 # servers, http servers).
1839 1839
1840 1840 if not remote.canpush():
1841 1841 raise util.Abort(_("destination does not support push"))
1842 1842 # get local lock as we might write phase data
1843 1843 locallock = self.lock()
1844 1844 try:
1845 1845 self.checkpush(force, revs)
1846 1846 lock = None
1847 1847 unbundle = remote.capable('unbundle')
1848 1848 if not unbundle:
1849 1849 lock = remote.lock()
1850 1850 try:
1851 1851 # discovery
1852 1852 fci = discovery.findcommonincoming
1853 1853 commoninc = fci(self, remote, force=force)
1854 1854 common, inc, remoteheads = commoninc
1855 1855 fco = discovery.findcommonoutgoing
1856 1856 outgoing = fco(self, remote, onlyheads=revs,
1857 1857 commoninc=commoninc, force=force)
1858 1858
1859 1859
1860 1860 if not outgoing.missing:
1861 1861 # nothing to push
1862 1862 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1863 1863 ret = None
1864 1864 else:
1865 1865 # something to push
1866 1866 if not force:
1867 1867 # if self.obsstore == False --> no obsolete
1868 1868 # then, save the iteration
1869 1869 if self.obsstore:
1870 1870 # this message are here for 80 char limit reason
1871 1871 mso = _("push includes an obsolete changeset: %s!")
1872 1872 msu = _("push includes an unstable changeset: %s!")
1873 1873 # If we are to push if there is at least one
1874 1874 # obsolete or unstable changeset in missing, at
1875 1875 # least one of the missinghead will be obsolete or
1876 1876 # unstable. So checking heads only is ok
1877 1877 for node in outgoing.missingheads:
1878 1878 ctx = self[node]
1879 1879 if ctx.obsolete():
1880 1880 raise util.Abort(_(mso) % ctx)
1881 1881 elif ctx.unstable():
1882 1882 raise util.Abort(_(msu) % ctx)
1883 1883 discovery.checkheads(self, remote, outgoing,
1884 1884 remoteheads, newbranch,
1885 1885 bool(inc))
1886 1886
1887 1887 # create a changegroup from local
1888 1888 if revs is None and not outgoing.excluded:
1889 1889 # push everything,
1890 1890 # use the fast path, no race possible on push
1891 1891 cg = self._changegroup(outgoing.missing, 'push')
1892 1892 else:
1893 1893 cg = self.getlocalbundle('push', outgoing)
1894 1894
1895 1895 # apply changegroup to remote
1896 1896 if unbundle:
1897 1897 # local repo finds heads on server, finds out what
1898 1898 # revs it must push. once revs transferred, if server
1899 1899 # finds it has different heads (someone else won
1900 1900 # commit/push race), server aborts.
1901 1901 if force:
1902 1902 remoteheads = ['force']
1903 1903 # ssh: return remote's addchangegroup()
1904 1904 # http: return remote's addchangegroup() or 0 for error
1905 1905 ret = remote.unbundle(cg, remoteheads, 'push')
1906 1906 else:
1907 1907 # we return an integer indicating remote head count
1908 1908 # change
1909 1909 ret = remote.addchangegroup(cg, 'push', self.url())
1910 1910
1911 1911 if ret:
1912 1912 # push succeed, synchronize target of the push
1913 1913 cheads = outgoing.missingheads
1914 1914 elif revs is None:
1915 1915 # All out push fails. synchronize all common
1916 1916 cheads = outgoing.commonheads
1917 1917 else:
1918 1918 # I want cheads = heads(::missingheads and ::commonheads)
1919 1919 # (missingheads is revs with secret changeset filtered out)
1920 1920 #
1921 1921 # This can be expressed as:
1922 1922 # cheads = ( (missingheads and ::commonheads)
1923 1923 # + (commonheads and ::missingheads))"
1924 1924 # )
1925 1925 #
1926 1926 # while trying to push we already computed the following:
1927 1927 # common = (::commonheads)
1928 1928 # missing = ((commonheads::missingheads) - commonheads)
1929 1929 #
1930 1930 # We can pick:
1931 1931 # * missingheads part of common (::commonheads)
1932 1932 common = set(outgoing.common)
1933 1933 cheads = [node for node in revs if node in common]
1934 1934 # and
1935 1935 # * commonheads parents on missing
1936 1936 revset = self.set('%ln and parents(roots(%ln))',
1937 1937 outgoing.commonheads,
1938 1938 outgoing.missing)
1939 1939 cheads.extend(c.node() for c in revset)
1940 1940 # even when we don't push, exchanging phase data is useful
1941 1941 remotephases = remote.listkeys('phases')
1942 1942 if not remotephases: # old server or public only repo
1943 1943 phases.advanceboundary(self, phases.public, cheads)
1944 1944 # don't push any phase data as there is nothing to push
1945 1945 else:
1946 1946 ana = phases.analyzeremotephases(self, cheads, remotephases)
1947 1947 pheads, droots = ana
1948 1948 ### Apply remote phase on local
1949 1949 if remotephases.get('publishing', False):
1950 1950 phases.advanceboundary(self, phases.public, cheads)
1951 1951 else: # publish = False
1952 1952 phases.advanceboundary(self, phases.public, pheads)
1953 1953 phases.advanceboundary(self, phases.draft, cheads)
1954 1954 ### Apply local phase on remote
1955 1955
1956 1956 # Get the list of all revs draft on remote by public here.
1957 1957 # XXX Beware that revset break if droots is not strictly
1958 1958 # XXX root we may want to ensure it is but it is costly
1959 1959 outdated = self.set('heads((%ln::%ln) and public())',
1960 1960 droots, cheads)
1961 1961 for newremotehead in outdated:
1962 1962 r = remote.pushkey('phases',
1963 1963 newremotehead.hex(),
1964 1964 str(phases.draft),
1965 1965 str(phases.public))
1966 1966 if not r:
1967 1967 self.ui.warn(_('updating %s to public failed!\n')
1968 1968 % newremotehead)
1969 1969 self.ui.debug('try to push obsolete markers to remote\n')
1970 1970 if (obsolete._enabled and self.obsstore and
1971 1971 'obsolete' in remote.listkeys('namespaces')):
1972 1972 rslts = []
1973 1973 remotedata = self.listkeys('obsolete')
1974 1974 for key in sorted(remotedata, reverse=True):
1975 1975 # reverse sort to ensure we end with dump0
1976 1976 data = remotedata[key]
1977 1977 rslts.append(remote.pushkey('obsolete', key, '', data))
1978 1978 if [r for r in rslts if not r]:
1979 1979 msg = _('failed to push some obsolete markers!\n')
1980 1980 self.ui.warn(msg)
1981 1981 finally:
1982 1982 if lock is not None:
1983 1983 lock.release()
1984 1984 finally:
1985 1985 locallock.release()
1986 1986
1987 1987 self.ui.debug("checking for updated bookmarks\n")
1988 1988 rb = remote.listkeys('bookmarks')
1989 1989 for k in rb.keys():
1990 1990 if k in self._bookmarks:
1991 1991 nr, nl = rb[k], hex(self._bookmarks[k])
1992 1992 if nr in self:
1993 1993 cr = self[nr]
1994 1994 cl = self[nl]
1995 1995 if bookmarks.validdest(self, cr, cl):
1996 1996 r = remote.pushkey('bookmarks', k, nr, nl)
1997 1997 if r:
1998 1998 self.ui.status(_("updating bookmark %s\n") % k)
1999 1999 else:
2000 2000 self.ui.warn(_('updating bookmark %s'
2001 2001 ' failed!\n') % k)
2002 2002
2003 2003 return ret
2004 2004
2005 2005 def changegroupinfo(self, nodes, source):
2006 2006 if self.ui.verbose or source == 'bundle':
2007 2007 self.ui.status(_("%d changesets found\n") % len(nodes))
2008 2008 if self.ui.debugflag:
2009 2009 self.ui.debug("list of changesets:\n")
2010 2010 for node in nodes:
2011 2011 self.ui.debug("%s\n" % hex(node))
2012 2012
2013 2013 def changegroupsubset(self, bases, heads, source):
2014 2014 """Compute a changegroup consisting of all the nodes that are
2015 2015 descendants of any of the bases and ancestors of any of the heads.
2016 2016 Return a chunkbuffer object whose read() method will return
2017 2017 successive changegroup chunks.
2018 2018
2019 2019 It is fairly complex as determining which filenodes and which
2020 2020 manifest nodes need to be included for the changeset to be complete
2021 2021 is non-trivial.
2022 2022
2023 2023 Another wrinkle is doing the reverse, figuring out which changeset in
2024 2024 the changegroup a particular filenode or manifestnode belongs to.
2025 2025 """
2026 2026 cl = self.changelog
2027 2027 if not bases:
2028 2028 bases = [nullid]
2029 2029 csets, bases, heads = cl.nodesbetween(bases, heads)
2030 2030 # We assume that all ancestors of bases are known
2031 2031 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2032 2032 return self._changegroupsubset(common, csets, heads, source)
2033 2033
2034 2034 def getlocalbundle(self, source, outgoing):
2035 2035 """Like getbundle, but taking a discovery.outgoing as an argument.
2036 2036
2037 2037 This is only implemented for local repos and reuses potentially
2038 2038 precomputed sets in outgoing."""
2039 2039 if not outgoing.missing:
2040 2040 return None
2041 2041 return self._changegroupsubset(outgoing.common,
2042 2042 outgoing.missing,
2043 2043 outgoing.missingheads,
2044 2044 source)
2045 2045
2046 2046 def getbundle(self, source, heads=None, common=None):
2047 2047 """Like changegroupsubset, but returns the set difference between the
2048 2048 ancestors of heads and the ancestors common.
2049 2049
2050 2050 If heads is None, use the local heads. If common is None, use [nullid].
2051 2051
2052 2052 The nodes in common might not all be known locally due to the way the
2053 2053 current discovery protocol works.
2054 2054 """
2055 2055 cl = self.changelog
2056 2056 if common:
2057 2057 nm = cl.nodemap
2058 2058 common = [n for n in common if n in nm]
2059 2059 else:
2060 2060 common = [nullid]
2061 2061 if not heads:
2062 2062 heads = cl.heads()
2063 2063 return self.getlocalbundle(source,
2064 2064 discovery.outgoing(cl, common, heads))
2065 2065
2066 2066 def _changegroupsubset(self, commonrevs, csets, heads, source):
2067 2067
2068 2068 cl = self.changelog
2069 2069 mf = self.manifest
2070 2070 mfs = {} # needed manifests
2071 2071 fnodes = {} # needed file nodes
2072 2072 changedfiles = set()
2073 2073 fstate = ['', {}]
2074 2074 count = [0, 0]
2075 2075
2076 2076 # can we go through the fast path ?
2077 2077 heads.sort()
2078 2078 if heads == sorted(self.heads()):
2079 2079 return self._changegroup(csets, source)
2080 2080
2081 2081 # slow path
2082 2082 self.hook('preoutgoing', throw=True, source=source)
2083 2083 self.changegroupinfo(csets, source)
2084 2084
2085 2085 # filter any nodes that claim to be part of the known set
2086 2086 def prune(revlog, missing):
2087 2087 rr, rl = revlog.rev, revlog.linkrev
2088 2088 return [n for n in missing
2089 2089 if rl(rr(n)) not in commonrevs]
2090 2090
2091 2091 progress = self.ui.progress
2092 2092 _bundling = _('bundling')
2093 2093 _changesets = _('changesets')
2094 2094 _manifests = _('manifests')
2095 2095 _files = _('files')
2096 2096
2097 2097 def lookup(revlog, x):
2098 2098 if revlog == cl:
2099 2099 c = cl.read(x)
2100 2100 changedfiles.update(c[3])
2101 2101 mfs.setdefault(c[0], x)
2102 2102 count[0] += 1
2103 2103 progress(_bundling, count[0],
2104 2104 unit=_changesets, total=count[1])
2105 2105 return x
2106 2106 elif revlog == mf:
2107 2107 clnode = mfs[x]
2108 2108 mdata = mf.readfast(x)
2109 2109 for f, n in mdata.iteritems():
2110 2110 if f in changedfiles:
2111 2111 fnodes[f].setdefault(n, clnode)
2112 2112 count[0] += 1
2113 2113 progress(_bundling, count[0],
2114 2114 unit=_manifests, total=count[1])
2115 2115 return clnode
2116 2116 else:
2117 2117 progress(_bundling, count[0], item=fstate[0],
2118 2118 unit=_files, total=count[1])
2119 2119 return fstate[1][x]
2120 2120
2121 2121 bundler = changegroup.bundle10(lookup)
2122 2122 reorder = self.ui.config('bundle', 'reorder', 'auto')
2123 2123 if reorder == 'auto':
2124 2124 reorder = None
2125 2125 else:
2126 2126 reorder = util.parsebool(reorder)
2127 2127
2128 2128 def gengroup():
2129 2129 # Create a changenode group generator that will call our functions
2130 2130 # back to lookup the owning changenode and collect information.
2131 2131 count[:] = [0, len(csets)]
2132 2132 for chunk in cl.group(csets, bundler, reorder=reorder):
2133 2133 yield chunk
2134 2134 progress(_bundling, None)
2135 2135
2136 2136 # Create a generator for the manifestnodes that calls our lookup
2137 2137 # and data collection functions back.
2138 2138 for f in changedfiles:
2139 2139 fnodes[f] = {}
2140 2140 count[:] = [0, len(mfs)]
2141 2141 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2142 2142 yield chunk
2143 2143 progress(_bundling, None)
2144 2144
2145 2145 mfs.clear()
2146 2146
2147 2147 # Go through all our files in order sorted by name.
2148 2148 count[:] = [0, len(changedfiles)]
2149 2149 for fname in sorted(changedfiles):
2150 2150 filerevlog = self.file(fname)
2151 2151 if not len(filerevlog):
2152 2152 raise util.Abort(_("empty or missing revlog for %s")
2153 2153 % fname)
2154 2154 fstate[0] = fname
2155 2155 fstate[1] = fnodes.pop(fname, {})
2156 2156
2157 2157 nodelist = prune(filerevlog, fstate[1])
2158 2158 if nodelist:
2159 2159 count[0] += 1
2160 2160 yield bundler.fileheader(fname)
2161 2161 for chunk in filerevlog.group(nodelist, bundler, reorder):
2162 2162 yield chunk
2163 2163
2164 2164 # Signal that no more groups are left.
2165 2165 yield bundler.close()
2166 2166 progress(_bundling, None)
2167 2167
2168 2168 if csets:
2169 2169 self.hook('outgoing', node=hex(csets[0]), source=source)
2170 2170
2171 2171 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2172 2172
2173 2173 def changegroup(self, basenodes, source):
2174 2174 # to avoid a race we use changegroupsubset() (issue1320)
2175 2175 return self.changegroupsubset(basenodes, self.heads(), source)
2176 2176
2177 2177 def _changegroup(self, nodes, source):
2178 2178 """Compute the changegroup of all nodes that we have that a recipient
2179 2179 doesn't. Return a chunkbuffer object whose read() method will return
2180 2180 successive changegroup chunks.
2181 2181
2182 2182 This is much easier than the previous function as we can assume that
2183 2183 the recipient has any changenode we aren't sending them.
2184 2184
2185 2185 nodes is the set of nodes to send"""
2186 2186
2187 2187 cl = self.changelog
2188 2188 mf = self.manifest
2189 2189 mfs = {}
2190 2190 changedfiles = set()
2191 2191 fstate = ['']
2192 2192 count = [0, 0]
2193 2193
2194 2194 self.hook('preoutgoing', throw=True, source=source)
2195 2195 self.changegroupinfo(nodes, source)
2196 2196
2197 2197 revset = set([cl.rev(n) for n in nodes])
2198 2198
2199 2199 def gennodelst(log):
2200 2200 ln, llr = log.node, log.linkrev
2201 2201 return [ln(r) for r in log if llr(r) in revset]
2202 2202
2203 2203 progress = self.ui.progress
2204 2204 _bundling = _('bundling')
2205 2205 _changesets = _('changesets')
2206 2206 _manifests = _('manifests')
2207 2207 _files = _('files')
2208 2208
2209 2209 def lookup(revlog, x):
2210 2210 if revlog == cl:
2211 2211 c = cl.read(x)
2212 2212 changedfiles.update(c[3])
2213 2213 mfs.setdefault(c[0], x)
2214 2214 count[0] += 1
2215 2215 progress(_bundling, count[0],
2216 2216 unit=_changesets, total=count[1])
2217 2217 return x
2218 2218 elif revlog == mf:
2219 2219 count[0] += 1
2220 2220 progress(_bundling, count[0],
2221 2221 unit=_manifests, total=count[1])
2222 2222 return cl.node(revlog.linkrev(revlog.rev(x)))
2223 2223 else:
2224 2224 progress(_bundling, count[0], item=fstate[0],
2225 2225 total=count[1], unit=_files)
2226 2226 return cl.node(revlog.linkrev(revlog.rev(x)))
2227 2227
2228 2228 bundler = changegroup.bundle10(lookup)
2229 2229 reorder = self.ui.config('bundle', 'reorder', 'auto')
2230 2230 if reorder == 'auto':
2231 2231 reorder = None
2232 2232 else:
2233 2233 reorder = util.parsebool(reorder)
2234 2234
2235 2235 def gengroup():
2236 2236 '''yield a sequence of changegroup chunks (strings)'''
2237 2237 # construct a list of all changed files
2238 2238
2239 2239 count[:] = [0, len(nodes)]
2240 2240 for chunk in cl.group(nodes, bundler, reorder=reorder):
2241 2241 yield chunk
2242 2242 progress(_bundling, None)
2243 2243
2244 2244 count[:] = [0, len(mfs)]
2245 2245 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2246 2246 yield chunk
2247 2247 progress(_bundling, None)
2248 2248
2249 2249 count[:] = [0, len(changedfiles)]
2250 2250 for fname in sorted(changedfiles):
2251 2251 filerevlog = self.file(fname)
2252 2252 if not len(filerevlog):
2253 2253 raise util.Abort(_("empty or missing revlog for %s")
2254 2254 % fname)
2255 2255 fstate[0] = fname
2256 2256 nodelist = gennodelst(filerevlog)
2257 2257 if nodelist:
2258 2258 count[0] += 1
2259 2259 yield bundler.fileheader(fname)
2260 2260 for chunk in filerevlog.group(nodelist, bundler, reorder):
2261 2261 yield chunk
2262 2262 yield bundler.close()
2263 2263 progress(_bundling, None)
2264 2264
2265 2265 if nodes:
2266 2266 self.hook('outgoing', node=hex(nodes[0]), source=source)
2267 2267
2268 2268 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2269 2269
2270 2270 def addchangegroup(self, source, srctype, url, emptyok=False):
2271 2271 """Add the changegroup returned by source.read() to this repo.
2272 2272 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2273 2273 the URL of the repo where this changegroup is coming from.
2274 2274
2275 2275 Return an integer summarizing the change to this repo:
2276 2276 - nothing changed or no source: 0
2277 2277 - more heads than before: 1+added heads (2..n)
2278 2278 - fewer heads than before: -1-removed heads (-2..-n)
2279 2279 - number of heads stays the same: 1
2280 2280 """
2281 2281 def csmap(x):
2282 2282 self.ui.debug("add changeset %s\n" % short(x))
2283 2283 return len(cl)
2284 2284
2285 2285 def revmap(x):
2286 2286 return cl.rev(x)
2287 2287
2288 2288 if not source:
2289 2289 return 0
2290 2290
2291 2291 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2292 2292
2293 2293 changesets = files = revisions = 0
2294 2294 efiles = set()
2295 2295
2296 2296 # write changelog data to temp files so concurrent readers will not see
2297 2297 # inconsistent view
2298 2298 cl = self.changelog
2299 2299 cl.delayupdate()
2300 2300 oldheads = cl.heads()
2301 2301
2302 2302 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2303 2303 try:
2304 2304 trp = weakref.proxy(tr)
2305 2305 # pull off the changeset group
2306 2306 self.ui.status(_("adding changesets\n"))
2307 2307 clstart = len(cl)
2308 2308 class prog(object):
2309 2309 step = _('changesets')
2310 2310 count = 1
2311 2311 ui = self.ui
2312 2312 total = None
2313 2313 def __call__(self):
2314 2314 self.ui.progress(self.step, self.count, unit=_('chunks'),
2315 2315 total=self.total)
2316 2316 self.count += 1
2317 2317 pr = prog()
2318 2318 source.callback = pr
2319 2319
2320 2320 source.changelogheader()
2321 2321 srccontent = cl.addgroup(source, csmap, trp)
2322 2322 if not (srccontent or emptyok):
2323 2323 raise util.Abort(_("received changelog group is empty"))
2324 2324 clend = len(cl)
2325 2325 changesets = clend - clstart
2326 2326 for c in xrange(clstart, clend):
2327 2327 efiles.update(self[c].files())
2328 2328 efiles = len(efiles)
2329 2329 self.ui.progress(_('changesets'), None)
2330 2330
2331 2331 # pull off the manifest group
2332 2332 self.ui.status(_("adding manifests\n"))
2333 2333 pr.step = _('manifests')
2334 2334 pr.count = 1
2335 2335 pr.total = changesets # manifests <= changesets
2336 2336 # no need to check for empty manifest group here:
2337 2337 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2338 2338 # no new manifest will be created and the manifest group will
2339 2339 # be empty during the pull
2340 2340 source.manifestheader()
2341 2341 self.manifest.addgroup(source, revmap, trp)
2342 2342 self.ui.progress(_('manifests'), None)
2343 2343
2344 2344 needfiles = {}
2345 2345 if self.ui.configbool('server', 'validate', default=False):
2346 2346 # validate incoming csets have their manifests
2347 2347 for cset in xrange(clstart, clend):
2348 2348 mfest = self.changelog.read(self.changelog.node(cset))[0]
2349 2349 mfest = self.manifest.readdelta(mfest)
2350 2350 # store file nodes we must see
2351 2351 for f, n in mfest.iteritems():
2352 2352 needfiles.setdefault(f, set()).add(n)
2353 2353
2354 2354 # process the files
2355 2355 self.ui.status(_("adding file changes\n"))
2356 2356 pr.step = _('files')
2357 2357 pr.count = 1
2358 2358 pr.total = efiles
2359 2359 source.callback = None
2360 2360
2361 2361 while True:
2362 2362 chunkdata = source.filelogheader()
2363 2363 if not chunkdata:
2364 2364 break
2365 2365 f = chunkdata["filename"]
2366 2366 self.ui.debug("adding %s revisions\n" % f)
2367 2367 pr()
2368 2368 fl = self.file(f)
2369 2369 o = len(fl)
2370 2370 if not fl.addgroup(source, revmap, trp):
2371 2371 raise util.Abort(_("received file revlog group is empty"))
2372 2372 revisions += len(fl) - o
2373 2373 files += 1
2374 2374 if f in needfiles:
2375 2375 needs = needfiles[f]
2376 2376 for new in xrange(o, len(fl)):
2377 2377 n = fl.node(new)
2378 2378 if n in needs:
2379 2379 needs.remove(n)
2380 2380 if not needs:
2381 2381 del needfiles[f]
2382 2382 self.ui.progress(_('files'), None)
2383 2383
2384 2384 for f, needs in needfiles.iteritems():
2385 2385 fl = self.file(f)
2386 2386 for n in needs:
2387 2387 try:
2388 2388 fl.rev(n)
2389 2389 except error.LookupError:
2390 2390 raise util.Abort(
2391 2391 _('missing file data for %s:%s - run hg verify') %
2392 2392 (f, hex(n)))
2393 2393
2394 2394 dh = 0
2395 2395 if oldheads:
2396 2396 heads = cl.heads()
2397 2397 dh = len(heads) - len(oldheads)
2398 2398 for h in heads:
2399 2399 if h not in oldheads and self[h].closesbranch():
2400 2400 dh -= 1
2401 2401 htext = ""
2402 2402 if dh:
2403 2403 htext = _(" (%+d heads)") % dh
2404 2404
2405 2405 self.ui.status(_("added %d changesets"
2406 2406 " with %d changes to %d files%s\n")
2407 2407 % (changesets, revisions, files, htext))
2408 2408 obsolete.clearobscaches(self)
2409 2409
2410 2410 if changesets > 0:
2411 2411 p = lambda: cl.writepending() and self.root or ""
2412 2412 self.hook('pretxnchangegroup', throw=True,
2413 2413 node=hex(cl.node(clstart)), source=srctype,
2414 2414 url=url, pending=p)
2415 2415
2416 2416 added = [cl.node(r) for r in xrange(clstart, clend)]
2417 2417 publishing = self.ui.configbool('phases', 'publish', True)
2418 2418 if srctype == 'push':
2419 2419 # Old server can not push the boundary themself.
2420 2420 # New server won't push the boundary if changeset already
2421 2421 # existed locally as secrete
2422 2422 #
2423 2423 # We should not use added here but the list of all change in
2424 2424 # the bundle
2425 2425 if publishing:
2426 2426 phases.advanceboundary(self, phases.public, srccontent)
2427 2427 else:
2428 2428 phases.advanceboundary(self, phases.draft, srccontent)
2429 2429 phases.retractboundary(self, phases.draft, added)
2430 2430 elif srctype != 'strip':
2431 2431 # publishing only alter behavior during push
2432 2432 #
2433 2433 # strip should not touch boundary at all
2434 2434 phases.retractboundary(self, phases.draft, added)
2435 2435
2436 2436 # make changelog see real files again
2437 2437 cl.finalize(trp)
2438 2438
2439 2439 tr.close()
2440 2440
2441 2441 if changesets > 0:
2442 2442 def runhooks():
2443 2443 # forcefully update the on-disk branch cache
2444 2444 self.ui.debug("updating the branch cache\n")
2445 2445 self.updatebranchcache()
2446 2446 self.hook("changegroup", node=hex(cl.node(clstart)),
2447 2447 source=srctype, url=url)
2448 2448
2449 2449 for n in added:
2450 2450 self.hook("incoming", node=hex(n), source=srctype,
2451 2451 url=url)
2452 2452 self._afterlock(runhooks)
2453 2453
2454 2454 finally:
2455 2455 tr.release()
2456 2456 # never return 0 here:
2457 2457 if dh < 0:
2458 2458 return dh - 1
2459 2459 else:
2460 2460 return dh + 1
2461 2461
2462 2462 def stream_in(self, remote, requirements):
2463 2463 lock = self.lock()
2464 2464 try:
2465 2465 fp = remote.stream_out()
2466 2466 l = fp.readline()
2467 2467 try:
2468 2468 resp = int(l)
2469 2469 except ValueError:
2470 2470 raise error.ResponseError(
2471 2471 _('unexpected response from remote server:'), l)
2472 2472 if resp == 1:
2473 2473 raise util.Abort(_('operation forbidden by server'))
2474 2474 elif resp == 2:
2475 2475 raise util.Abort(_('locking the remote repository failed'))
2476 2476 elif resp != 0:
2477 2477 raise util.Abort(_('the server sent an unknown error code'))
2478 2478 self.ui.status(_('streaming all changes\n'))
2479 2479 l = fp.readline()
2480 2480 try:
2481 2481 total_files, total_bytes = map(int, l.split(' ', 1))
2482 2482 except (ValueError, TypeError):
2483 2483 raise error.ResponseError(
2484 2484 _('unexpected response from remote server:'), l)
2485 2485 self.ui.status(_('%d files to transfer, %s of data\n') %
2486 2486 (total_files, util.bytecount(total_bytes)))
2487 2487 handled_bytes = 0
2488 2488 self.ui.progress(_('clone'), 0, total=total_bytes)
2489 2489 start = time.time()
2490 2490 for i in xrange(total_files):
2491 2491 # XXX doesn't support '\n' or '\r' in filenames
2492 2492 l = fp.readline()
2493 2493 try:
2494 2494 name, size = l.split('\0', 1)
2495 2495 size = int(size)
2496 2496 except (ValueError, TypeError):
2497 2497 raise error.ResponseError(
2498 2498 _('unexpected response from remote server:'), l)
2499 2499 if self.ui.debugflag:
2500 2500 self.ui.debug('adding %s (%s)\n' %
2501 2501 (name, util.bytecount(size)))
2502 2502 # for backwards compat, name was partially encoded
2503 2503 ofp = self.sopener(store.decodedir(name), 'w')
2504 2504 for chunk in util.filechunkiter(fp, limit=size):
2505 2505 handled_bytes += len(chunk)
2506 2506 self.ui.progress(_('clone'), handled_bytes,
2507 2507 total=total_bytes)
2508 2508 ofp.write(chunk)
2509 2509 ofp.close()
2510 2510 elapsed = time.time() - start
2511 2511 if elapsed <= 0:
2512 2512 elapsed = 0.001
2513 2513 self.ui.progress(_('clone'), None)
2514 2514 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2515 2515 (util.bytecount(total_bytes), elapsed,
2516 2516 util.bytecount(total_bytes / elapsed)))
2517 2517
2518 2518 # new requirements = old non-format requirements +
2519 2519 # new format-related
2520 2520 # requirements from the streamed-in repository
2521 2521 requirements.update(set(self.requirements) - self.supportedformats)
2522 2522 self._applyrequirements(requirements)
2523 2523 self._writerequirements()
2524 2524
2525 2525 self.invalidate()
2526 2526 return len(self.heads()) + 1
2527 2527 finally:
2528 2528 lock.release()
2529 2529
2530 2530 def clone(self, remote, heads=[], stream=False):
2531 2531 '''clone remote repository.
2532 2532
2533 2533 keyword arguments:
2534 2534 heads: list of revs to clone (forces use of pull)
2535 2535 stream: use streaming clone if possible'''
2536 2536
2537 2537 # now, all clients that can request uncompressed clones can
2538 2538 # read repo formats supported by all servers that can serve
2539 2539 # them.
2540 2540
2541 2541 # if revlog format changes, client will have to check version
2542 2542 # and format flags on "stream" capability, and use
2543 2543 # uncompressed only if compatible.
2544 2544
2545 2545 if not stream:
2546 2546 # if the server explicitly prefers to stream (for fast LANs)
2547 2547 stream = remote.capable('stream-preferred')
2548 2548
2549 2549 if stream and not heads:
2550 2550 # 'stream' means remote revlog format is revlogv1 only
2551 2551 if remote.capable('stream'):
2552 2552 return self.stream_in(remote, set(('revlogv1',)))
2553 2553 # otherwise, 'streamreqs' contains the remote revlog format
2554 2554 streamreqs = remote.capable('streamreqs')
2555 2555 if streamreqs:
2556 2556 streamreqs = set(streamreqs.split(','))
2557 2557 # if we support it, stream in and adjust our requirements
2558 2558 if not streamreqs - self.supportedformats:
2559 2559 return self.stream_in(remote, streamreqs)
2560 2560 return self.pull(remote, heads)
2561 2561
2562 2562 def pushkey(self, namespace, key, old, new):
2563 2563 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2564 2564 old=old, new=new)
2565 2565 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2566 2566 ret = pushkey.push(self, namespace, key, old, new)
2567 2567 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2568 2568 ret=ret)
2569 2569 return ret
2570 2570
2571 2571 def listkeys(self, namespace):
2572 2572 self.hook('prelistkeys', throw=True, namespace=namespace)
2573 2573 self.ui.debug('listing keys for "%s"\n' % namespace)
2574 2574 values = pushkey.list(self, namespace)
2575 2575 self.hook('listkeys', namespace=namespace, values=values)
2576 2576 return values
2577 2577
2578 2578 def debugwireargs(self, one, two, three=None, four=None, five=None):
2579 2579 '''used to test argument passing over the wire'''
2580 2580 return "%s %s %s %s %s" % (one, two, three, four, five)
2581 2581
2582 2582 def savecommitmessage(self, text):
2583 2583 fp = self.opener('last-message.txt', 'wb')
2584 2584 try:
2585 2585 fp.write(text)
2586 2586 finally:
2587 2587 fp.close()
2588 2588 return self.pathto(fp.name[len(self.root)+1:])
2589 2589
2590 2590 # used to avoid circular references so destructors work
2591 2591 def aftertrans(files):
2592 2592 renamefiles = [tuple(t) for t in files]
2593 2593 def a():
2594 2594 for src, dest in renamefiles:
2595 2595 try:
2596 2596 util.rename(src, dest)
2597 2597 except OSError: # journal file does not yet exist
2598 2598 pass
2599 2599 return a
2600 2600
2601 2601 def undoname(fn):
2602 2602 base, name = os.path.split(fn)
2603 2603 assert name.startswith('journal')
2604 2604 return os.path.join(base, name.replace('journal', 'undo', 1))
2605 2605
2606 2606 def instance(ui, path, create):
2607 2607 return localrepository(ui, util.urllocalpath(path), create)
2608 2608
2609 2609 def islocal(path):
2610 2610 return True
General Comments 0
You need to be logged in to leave comments. Login now