##// END OF EJS Templates
en-us: initialization
timeless@mozdev.org -
r17532:e4b2f0eb default
parent child Browse files
Show More
@@ -1,2605 +1,2605 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28 28
29 29 class localpeer(peer.peerrepository):
30 30 '''peer for a local repo; reflects only the most recent API'''
31 31
32 32 def __init__(self, repo, caps=MODERNCAPS):
33 33 peer.peerrepository.__init__(self)
34 34 self._repo = repo
35 35 self.ui = repo.ui
36 36 self._caps = repo._restrictcapabilities(caps)
37 37 self.requirements = repo.requirements
38 38 self.supportedformats = repo.supportedformats
39 39
40 40 def close(self):
41 41 self._repo.close()
42 42
43 43 def _capabilities(self):
44 44 return self._caps
45 45
46 46 def local(self):
47 47 return self._repo
48 48
49 49 def canpush(self):
50 50 return True
51 51
52 52 def url(self):
53 53 return self._repo.url()
54 54
55 55 def lookup(self, key):
56 56 return self._repo.lookup(key)
57 57
58 58 def branchmap(self):
59 59 return discovery.visiblebranchmap(self._repo)
60 60
61 61 def heads(self):
62 62 return discovery.visibleheads(self._repo)
63 63
64 64 def known(self, nodes):
65 65 return self._repo.known(nodes)
66 66
67 67 def getbundle(self, source, heads=None, common=None):
68 68 return self._repo.getbundle(source, heads=heads, common=common)
69 69
70 70 # TODO We might want to move the next two calls into legacypeer and add
71 71 # unbundle instead.
72 72
73 73 def lock(self):
74 74 return self._repo.lock()
75 75
76 76 def addchangegroup(self, cg, source, url):
77 77 return self._repo.addchangegroup(cg, source, url)
78 78
79 79 def pushkey(self, namespace, key, old, new):
80 80 return self._repo.pushkey(namespace, key, old, new)
81 81
82 82 def listkeys(self, namespace):
83 83 return self._repo.listkeys(namespace)
84 84
85 85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 86 '''used to test argument passing over the wire'''
87 87 return "%s %s %s %s %s" % (one, two, three, four, five)
88 88
89 89 class locallegacypeer(localpeer):
90 90 '''peer extension which implements legacy methods too; used for tests with
91 91 restricted capabilities'''
92 92
93 93 def __init__(self, repo):
94 94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95 95
96 96 def branches(self, nodes):
97 97 return self._repo.branches(nodes)
98 98
99 99 def between(self, pairs):
100 100 return self._repo.between(pairs)
101 101
102 102 def changegroup(self, basenodes, source):
103 103 return self._repo.changegroup(basenodes, source)
104 104
105 105 def changegroupsubset(self, bases, heads, source):
106 106 return self._repo.changegroupsubset(bases, heads, source)
107 107
108 108 class localrepository(object):
109 109
110 110 supportedformats = set(('revlogv1', 'generaldelta'))
111 111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 112 'dotencode'))
113 113 openerreqs = set(('revlogv1', 'generaldelta'))
114 114 requirements = ['revlogv1']
115 115
116 116 def _baserequirements(self, create):
117 117 return self.requirements[:]
118 118
119 119 def __init__(self, baseui, path=None, create=False):
120 120 self.wopener = scmutil.opener(path, expand=True)
121 121 self.wvfs = self.wopener
122 122 self.root = self.wvfs.base
123 123 self.path = self.wvfs.join(".hg")
124 124 self.origroot = path
125 125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 126 self.opener = scmutil.opener(self.path)
127 127 self.vfs = self.opener
128 128 self.baseui = baseui
129 129 self.ui = baseui.copy()
130 130 # A list of callback to shape the phase if no data were found.
131 131 # Callback are in the form: func(repo, roots) --> processed root.
132 132 # This list it to be filled by extension during repo setup
133 133 self._phasedefaults = []
134 134 try:
135 135 self.ui.readconfig(self.join("hgrc"), self.root)
136 136 extensions.loadall(self.ui)
137 137 except IOError:
138 138 pass
139 139
140 140 if not self.vfs.isdir():
141 141 if create:
142 142 if not self.wvfs.exists():
143 143 self.wvfs.makedirs()
144 144 self.vfs.makedir(notindexed=True)
145 145 requirements = self._baserequirements(create)
146 146 if self.ui.configbool('format', 'usestore', True):
147 147 self.vfs.mkdir("store")
148 148 requirements.append("store")
149 149 if self.ui.configbool('format', 'usefncache', True):
150 150 requirements.append("fncache")
151 151 if self.ui.configbool('format', 'dotencode', True):
152 152 requirements.append('dotencode')
153 153 # create an invalid changelog
154 154 self.vfs.append(
155 155 "00changelog.i",
156 156 '\0\0\0\2' # represents revlogv2
157 157 ' dummy changelog to prevent using the old repo layout'
158 158 )
159 159 if self.ui.configbool('format', 'generaldelta', False):
160 160 requirements.append("generaldelta")
161 161 requirements = set(requirements)
162 162 else:
163 163 raise error.RepoError(_("repository %s not found") % path)
164 164 elif create:
165 165 raise error.RepoError(_("repository %s already exists") % path)
166 166 else:
167 167 try:
168 168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 169 except IOError, inst:
170 170 if inst.errno != errno.ENOENT:
171 171 raise
172 172 requirements = set()
173 173
174 174 self.sharedpath = self.path
175 175 try:
176 176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 177 if not os.path.exists(s):
178 178 raise error.RepoError(
179 179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 180 self.sharedpath = s
181 181 except IOError, inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184
185 185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
186 186 self.spath = self.store.path
187 187 self.sopener = self.store.opener
188 188 self.svfs = self.sopener
189 189 self.sjoin = self.store.join
190 190 self.opener.createmode = self.store.createmode
191 191 self._applyrequirements(requirements)
192 192 if create:
193 193 self._writerequirements()
194 194
195 195
196 196 self._branchcache = None
197 197 self._branchcachetip = None
198 198 self.filterpats = {}
199 199 self._datafilters = {}
200 200 self._transref = self._lockref = self._wlockref = None
201 201
202 202 # A cache for various files under .hg/ that tracks file changes,
203 203 # (used by the filecache decorator)
204 204 #
205 205 # Maps a property name to its util.filecacheentry
206 206 self._filecache = {}
207 207
208 208 def close(self):
209 209 pass
210 210
211 211 def _restrictcapabilities(self, caps):
212 212 return caps
213 213
214 214 def _applyrequirements(self, requirements):
215 215 self.requirements = requirements
216 216 self.sopener.options = dict((r, 1) for r in requirements
217 217 if r in self.openerreqs)
218 218
219 219 def _writerequirements(self):
220 220 reqfile = self.opener("requires", "w")
221 221 for r in self.requirements:
222 222 reqfile.write("%s\n" % r)
223 223 reqfile.close()
224 224
225 225 def _checknested(self, path):
226 226 """Determine if path is a legal nested repository."""
227 227 if not path.startswith(self.root):
228 228 return False
229 229 subpath = path[len(self.root) + 1:]
230 230 normsubpath = util.pconvert(subpath)
231 231
232 232 # XXX: Checking against the current working copy is wrong in
233 233 # the sense that it can reject things like
234 234 #
235 235 # $ hg cat -r 10 sub/x.txt
236 236 #
237 237 # if sub/ is no longer a subrepository in the working copy
238 238 # parent revision.
239 239 #
240 240 # However, it can of course also allow things that would have
241 241 # been rejected before, such as the above cat command if sub/
242 242 # is a subrepository now, but was a normal directory before.
243 243 # The old path auditor would have rejected by mistake since it
244 244 # panics when it sees sub/.hg/.
245 245 #
246 246 # All in all, checking against the working copy seems sensible
247 247 # since we want to prevent access to nested repositories on
248 248 # the filesystem *now*.
249 249 ctx = self[None]
250 250 parts = util.splitpath(subpath)
251 251 while parts:
252 252 prefix = '/'.join(parts)
253 253 if prefix in ctx.substate:
254 254 if prefix == normsubpath:
255 255 return True
256 256 else:
257 257 sub = ctx.sub(prefix)
258 258 return sub.checknested(subpath[len(prefix) + 1:])
259 259 else:
260 260 parts.pop()
261 261 return False
262 262
263 263 def peer(self):
264 264 return localpeer(self) # not cached to avoid reference cycle
265 265
266 266 @filecache('bookmarks')
267 267 def _bookmarks(self):
268 268 return bookmarks.read(self)
269 269
270 270 @filecache('bookmarks.current')
271 271 def _bookmarkcurrent(self):
272 272 return bookmarks.readcurrent(self)
273 273
274 274 def _writebookmarks(self, marks):
275 275 bookmarks.write(self)
276 276
277 277 def bookmarkheads(self, bookmark):
278 278 name = bookmark.split('@', 1)[0]
279 279 heads = []
280 280 for mark, n in self._bookmarks.iteritems():
281 281 if mark.split('@', 1)[0] == name:
282 282 heads.append(n)
283 283 return heads
284 284
285 285 @storecache('phaseroots')
286 286 def _phasecache(self):
287 287 return phases.phasecache(self, self._phasedefaults)
288 288
289 289 @storecache('obsstore')
290 290 def obsstore(self):
291 291 store = obsolete.obsstore(self.sopener)
292 292 if store and not obsolete._enabled:
293 293 # message is rare enough to not be translated
294 294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 295 self.ui.warn(msg % len(list(store)))
296 296 return store
297 297
298 298 @propertycache
299 299 def hiddenrevs(self):
300 300 """hiddenrevs: revs that should be hidden by command and tools
301 301
302 This set is carried on the repo to ease initialisation and lazy
302 This set is carried on the repo to ease initialization and lazy
303 303 loading it'll probably move back to changelog for efficiently and
304 304 consistency reason
305 305
306 306 Note that the hiddenrevs will needs invalidations when
307 307 - a new changesets is added (possible unstable above extinct)
308 308 - a new obsolete marker is added (possible new extinct changeset)
309 309 """
310 310 hidden = set()
311 311 if self.obsstore:
312 312 ### hide extinct changeset that are not accessible by any mean
313 313 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
314 314 hidden.update(self.revs(hiddenquery))
315 315 return hidden
316 316
317 317 @storecache('00changelog.i')
318 318 def changelog(self):
319 319 c = changelog.changelog(self.sopener)
320 320 if 'HG_PENDING' in os.environ:
321 321 p = os.environ['HG_PENDING']
322 322 if p.startswith(self.root):
323 323 c.readpending('00changelog.i.a')
324 324 return c
325 325
326 326 @storecache('00manifest.i')
327 327 def manifest(self):
328 328 return manifest.manifest(self.sopener)
329 329
330 330 @filecache('dirstate')
331 331 def dirstate(self):
332 332 warned = [0]
333 333 def validate(node):
334 334 try:
335 335 self.changelog.rev(node)
336 336 return node
337 337 except error.LookupError:
338 338 if not warned[0]:
339 339 warned[0] = True
340 340 self.ui.warn(_("warning: ignoring unknown"
341 341 " working parent %s!\n") % short(node))
342 342 return nullid
343 343
344 344 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
345 345
346 346 def __getitem__(self, changeid):
347 347 if changeid is None:
348 348 return context.workingctx(self)
349 349 return context.changectx(self, changeid)
350 350
351 351 def __contains__(self, changeid):
352 352 try:
353 353 return bool(self.lookup(changeid))
354 354 except error.RepoLookupError:
355 355 return False
356 356
357 357 def __nonzero__(self):
358 358 return True
359 359
360 360 def __len__(self):
361 361 return len(self.changelog)
362 362
363 363 def __iter__(self):
364 364 for i in xrange(len(self)):
365 365 yield i
366 366
367 367 def revs(self, expr, *args):
368 368 '''Return a list of revisions matching the given revset'''
369 369 expr = revset.formatspec(expr, *args)
370 370 m = revset.match(None, expr)
371 371 return [r for r in m(self, range(len(self)))]
372 372
373 373 def set(self, expr, *args):
374 374 '''
375 375 Yield a context for each matching revision, after doing arg
376 376 replacement via revset.formatspec
377 377 '''
378 378 for r in self.revs(expr, *args):
379 379 yield self[r]
380 380
381 381 def url(self):
382 382 return 'file:' + self.root
383 383
384 384 def hook(self, name, throw=False, **args):
385 385 return hook.hook(self.ui, self, name, throw, **args)
386 386
387 387 tag_disallowed = ':\r\n'
388 388
389 389 def _tag(self, names, node, message, local, user, date, extra={}):
390 390 if isinstance(names, str):
391 391 allchars = names
392 392 names = (names,)
393 393 else:
394 394 allchars = ''.join(names)
395 395 for c in self.tag_disallowed:
396 396 if c in allchars:
397 397 raise util.Abort(_('%r cannot be used in a tag name') % c)
398 398
399 399 branches = self.branchmap()
400 400 for name in names:
401 401 self.hook('pretag', throw=True, node=hex(node), tag=name,
402 402 local=local)
403 403 if name in branches:
404 404 self.ui.warn(_("warning: tag %s conflicts with existing"
405 405 " branch name\n") % name)
406 406
407 407 def writetags(fp, names, munge, prevtags):
408 408 fp.seek(0, 2)
409 409 if prevtags and prevtags[-1] != '\n':
410 410 fp.write('\n')
411 411 for name in names:
412 412 m = munge and munge(name) or name
413 413 if (self._tagscache.tagtypes and
414 414 name in self._tagscache.tagtypes):
415 415 old = self.tags().get(name, nullid)
416 416 fp.write('%s %s\n' % (hex(old), m))
417 417 fp.write('%s %s\n' % (hex(node), m))
418 418 fp.close()
419 419
420 420 prevtags = ''
421 421 if local:
422 422 try:
423 423 fp = self.opener('localtags', 'r+')
424 424 except IOError:
425 425 fp = self.opener('localtags', 'a')
426 426 else:
427 427 prevtags = fp.read()
428 428
429 429 # local tags are stored in the current charset
430 430 writetags(fp, names, None, prevtags)
431 431 for name in names:
432 432 self.hook('tag', node=hex(node), tag=name, local=local)
433 433 return
434 434
435 435 try:
436 436 fp = self.wfile('.hgtags', 'rb+')
437 437 except IOError, e:
438 438 if e.errno != errno.ENOENT:
439 439 raise
440 440 fp = self.wfile('.hgtags', 'ab')
441 441 else:
442 442 prevtags = fp.read()
443 443
444 444 # committed tags are stored in UTF-8
445 445 writetags(fp, names, encoding.fromlocal, prevtags)
446 446
447 447 fp.close()
448 448
449 449 self.invalidatecaches()
450 450
451 451 if '.hgtags' not in self.dirstate:
452 452 self[None].add(['.hgtags'])
453 453
454 454 m = matchmod.exact(self.root, '', ['.hgtags'])
455 455 tagnode = self.commit(message, user, date, extra=extra, match=m)
456 456
457 457 for name in names:
458 458 self.hook('tag', node=hex(node), tag=name, local=local)
459 459
460 460 return tagnode
461 461
462 462 def tag(self, names, node, message, local, user, date):
463 463 '''tag a revision with one or more symbolic names.
464 464
465 465 names is a list of strings or, when adding a single tag, names may be a
466 466 string.
467 467
468 468 if local is True, the tags are stored in a per-repository file.
469 469 otherwise, they are stored in the .hgtags file, and a new
470 470 changeset is committed with the change.
471 471
472 472 keyword arguments:
473 473
474 474 local: whether to store tags in non-version-controlled file
475 475 (default False)
476 476
477 477 message: commit message to use if committing
478 478
479 479 user: name of user to use if committing
480 480
481 481 date: date tuple to use if committing'''
482 482
483 483 if not local:
484 484 for x in self.status()[:5]:
485 485 if '.hgtags' in x:
486 486 raise util.Abort(_('working copy of .hgtags is changed '
487 487 '(please commit .hgtags manually)'))
488 488
489 489 self.tags() # instantiate the cache
490 490 self._tag(names, node, message, local, user, date)
491 491
492 492 @propertycache
493 493 def _tagscache(self):
494 494 '''Returns a tagscache object that contains various tags related
495 495 caches.'''
496 496
497 497 # This simplifies its cache management by having one decorated
498 498 # function (this one) and the rest simply fetch things from it.
499 499 class tagscache(object):
500 500 def __init__(self):
501 501 # These two define the set of tags for this repository. tags
502 502 # maps tag name to node; tagtypes maps tag name to 'global' or
503 503 # 'local'. (Global tags are defined by .hgtags across all
504 504 # heads, and local tags are defined in .hg/localtags.)
505 505 # They constitute the in-memory cache of tags.
506 506 self.tags = self.tagtypes = None
507 507
508 508 self.nodetagscache = self.tagslist = None
509 509
510 510 cache = tagscache()
511 511 cache.tags, cache.tagtypes = self._findtags()
512 512
513 513 return cache
514 514
515 515 def tags(self):
516 516 '''return a mapping of tag to node'''
517 517 t = {}
518 518 for k, v in self._tagscache.tags.iteritems():
519 519 try:
520 520 # ignore tags to unknown nodes
521 521 self.changelog.rev(v)
522 522 t[k] = v
523 523 except (error.LookupError, ValueError):
524 524 pass
525 525 return t
526 526
527 527 def _findtags(self):
528 528 '''Do the hard work of finding tags. Return a pair of dicts
529 529 (tags, tagtypes) where tags maps tag name to node, and tagtypes
530 530 maps tag name to a string like \'global\' or \'local\'.
531 531 Subclasses or extensions are free to add their own tags, but
532 532 should be aware that the returned dicts will be retained for the
533 533 duration of the localrepo object.'''
534 534
535 535 # XXX what tagtype should subclasses/extensions use? Currently
536 536 # mq and bookmarks add tags, but do not set the tagtype at all.
537 537 # Should each extension invent its own tag type? Should there
538 538 # be one tagtype for all such "virtual" tags? Or is the status
539 539 # quo fine?
540 540
541 541 alltags = {} # map tag name to (node, hist)
542 542 tagtypes = {}
543 543
544 544 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
545 545 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
546 546
547 547 # Build the return dicts. Have to re-encode tag names because
548 548 # the tags module always uses UTF-8 (in order not to lose info
549 549 # writing to the cache), but the rest of Mercurial wants them in
550 550 # local encoding.
551 551 tags = {}
552 552 for (name, (node, hist)) in alltags.iteritems():
553 553 if node != nullid:
554 554 tags[encoding.tolocal(name)] = node
555 555 tags['tip'] = self.changelog.tip()
556 556 tagtypes = dict([(encoding.tolocal(name), value)
557 557 for (name, value) in tagtypes.iteritems()])
558 558 return (tags, tagtypes)
559 559
560 560 def tagtype(self, tagname):
561 561 '''
562 562 return the type of the given tag. result can be:
563 563
564 564 'local' : a local tag
565 565 'global' : a global tag
566 566 None : tag does not exist
567 567 '''
568 568
569 569 return self._tagscache.tagtypes.get(tagname)
570 570
571 571 def tagslist(self):
572 572 '''return a list of tags ordered by revision'''
573 573 if not self._tagscache.tagslist:
574 574 l = []
575 575 for t, n in self.tags().iteritems():
576 576 r = self.changelog.rev(n)
577 577 l.append((r, t, n))
578 578 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
579 579
580 580 return self._tagscache.tagslist
581 581
582 582 def nodetags(self, node):
583 583 '''return the tags associated with a node'''
584 584 if not self._tagscache.nodetagscache:
585 585 nodetagscache = {}
586 586 for t, n in self._tagscache.tags.iteritems():
587 587 nodetagscache.setdefault(n, []).append(t)
588 588 for tags in nodetagscache.itervalues():
589 589 tags.sort()
590 590 self._tagscache.nodetagscache = nodetagscache
591 591 return self._tagscache.nodetagscache.get(node, [])
592 592
593 593 def nodebookmarks(self, node):
594 594 marks = []
595 595 for bookmark, n in self._bookmarks.iteritems():
596 596 if n == node:
597 597 marks.append(bookmark)
598 598 return sorted(marks)
599 599
600 600 def _branchtags(self, partial, lrev):
601 601 # TODO: rename this function?
602 602 tiprev = len(self) - 1
603 603 if lrev != tiprev:
604 604 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
605 605 self._updatebranchcache(partial, ctxgen)
606 606 self._writebranchcache(partial, self.changelog.tip(), tiprev)
607 607
608 608 return partial
609 609
610 610 def updatebranchcache(self):
611 611 tip = self.changelog.tip()
612 612 if self._branchcache is not None and self._branchcachetip == tip:
613 613 return
614 614
615 615 oldtip = self._branchcachetip
616 616 self._branchcachetip = tip
617 617 if oldtip is None or oldtip not in self.changelog.nodemap:
618 618 partial, last, lrev = self._readbranchcache()
619 619 else:
620 620 lrev = self.changelog.rev(oldtip)
621 621 partial = self._branchcache
622 622
623 623 self._branchtags(partial, lrev)
624 624 # this private cache holds all heads (not just the branch tips)
625 625 self._branchcache = partial
626 626
627 627 def branchmap(self):
628 628 '''returns a dictionary {branch: [branchheads]}'''
629 629 self.updatebranchcache()
630 630 return self._branchcache
631 631
632 632 def _branchtip(self, heads):
633 633 '''return the tipmost branch head in heads'''
634 634 tip = heads[-1]
635 635 for h in reversed(heads):
636 636 if not self[h].closesbranch():
637 637 tip = h
638 638 break
639 639 return tip
640 640
641 641 def branchtip(self, branch):
642 642 '''return the tip node for a given branch'''
643 643 if branch not in self.branchmap():
644 644 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
645 645 return self._branchtip(self.branchmap()[branch])
646 646
647 647 def branchtags(self):
648 648 '''return a dict where branch names map to the tipmost head of
649 649 the branch, open heads come before closed'''
650 650 bt = {}
651 651 for bn, heads in self.branchmap().iteritems():
652 652 bt[bn] = self._branchtip(heads)
653 653 return bt
654 654
655 655 def _readbranchcache(self):
656 656 partial = {}
657 657 try:
658 658 f = self.opener("cache/branchheads")
659 659 lines = f.read().split('\n')
660 660 f.close()
661 661 except (IOError, OSError):
662 662 return {}, nullid, nullrev
663 663
664 664 try:
665 665 last, lrev = lines.pop(0).split(" ", 1)
666 666 last, lrev = bin(last), int(lrev)
667 667 if lrev >= len(self) or self[lrev].node() != last:
668 668 # invalidate the cache
669 669 raise ValueError('invalidating branch cache (tip differs)')
670 670 for l in lines:
671 671 if not l:
672 672 continue
673 673 node, label = l.split(" ", 1)
674 674 label = encoding.tolocal(label.strip())
675 675 if not node in self:
676 676 raise ValueError('invalidating branch cache because node '+
677 677 '%s does not exist' % node)
678 678 partial.setdefault(label, []).append(bin(node))
679 679 except KeyboardInterrupt:
680 680 raise
681 681 except Exception, inst:
682 682 if self.ui.debugflag:
683 683 self.ui.warn(str(inst), '\n')
684 684 partial, last, lrev = {}, nullid, nullrev
685 685 return partial, last, lrev
686 686
687 687 def _writebranchcache(self, branches, tip, tiprev):
688 688 try:
689 689 f = self.opener("cache/branchheads", "w", atomictemp=True)
690 690 f.write("%s %s\n" % (hex(tip), tiprev))
691 691 for label, nodes in branches.iteritems():
692 692 for node in nodes:
693 693 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
694 694 f.close()
695 695 except (IOError, OSError):
696 696 pass
697 697
698 698 def _updatebranchcache(self, partial, ctxgen):
699 699 """Given a branchhead cache, partial, that may have extra nodes or be
700 700 missing heads, and a generator of nodes that are at least a superset of
701 701 heads missing, this function updates partial to be correct.
702 702 """
703 703 # collect new branch entries
704 704 newbranches = {}
705 705 for c in ctxgen:
706 706 newbranches.setdefault(c.branch(), []).append(c.node())
707 707 # if older branchheads are reachable from new ones, they aren't
708 708 # really branchheads. Note checking parents is insufficient:
709 709 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
710 710 for branch, newnodes in newbranches.iteritems():
711 711 bheads = partial.setdefault(branch, [])
712 712 # Remove candidate heads that no longer are in the repo (e.g., as
713 713 # the result of a strip that just happened). Avoid using 'node in
714 714 # self' here because that dives down into branchcache code somewhat
715 715 # recrusively.
716 716 bheadrevs = [self.changelog.rev(node) for node in bheads
717 717 if self.changelog.hasnode(node)]
718 718 newheadrevs = [self.changelog.rev(node) for node in newnodes
719 719 if self.changelog.hasnode(node)]
720 720 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
721 721 # Remove duplicates - nodes that are in newheadrevs and are already
722 722 # in bheadrevs. This can happen if you strip a node whose parent
723 723 # was already a head (because they're on different branches).
724 724 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
725 725
726 726 # Starting from tip means fewer passes over reachable. If we know
727 727 # the new candidates are not ancestors of existing heads, we don't
728 728 # have to examine ancestors of existing heads
729 729 if ctxisnew:
730 730 iterrevs = sorted(newheadrevs)
731 731 else:
732 732 iterrevs = list(bheadrevs)
733 733
734 734 # This loop prunes out two kinds of heads - heads that are
735 735 # superseded by a head in newheadrevs, and newheadrevs that are not
736 736 # heads because an existing head is their descendant.
737 737 while iterrevs:
738 738 latest = iterrevs.pop()
739 739 if latest not in bheadrevs:
740 740 continue
741 741 ancestors = set(self.changelog.ancestors([latest],
742 742 bheadrevs[0]))
743 743 if ancestors:
744 744 bheadrevs = [b for b in bheadrevs if b not in ancestors]
745 745 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
746 746
747 747 # There may be branches that cease to exist when the last commit in the
748 748 # branch was stripped. This code filters them out. Note that the
749 749 # branch that ceased to exist may not be in newbranches because
750 750 # newbranches is the set of candidate heads, which when you strip the
751 751 # last commit in a branch will be the parent branch.
752 752 for branch in partial.keys():
753 753 nodes = [head for head in partial[branch]
754 754 if self.changelog.hasnode(head)]
755 755 if not nodes:
756 756 del partial[branch]
757 757
758 758 def lookup(self, key):
759 759 return self[key].node()
760 760
761 761 def lookupbranch(self, key, remote=None):
762 762 repo = remote or self
763 763 if key in repo.branchmap():
764 764 return key
765 765
766 766 repo = (remote and remote.local()) and remote or self
767 767 return repo[key].branch()
768 768
769 769 def known(self, nodes):
770 770 nm = self.changelog.nodemap
771 771 pc = self._phasecache
772 772 result = []
773 773 for n in nodes:
774 774 r = nm.get(n)
775 775 resp = not (r is None or pc.phase(self, r) >= phases.secret)
776 776 result.append(resp)
777 777 return result
778 778
779 779 def local(self):
780 780 return self
781 781
782 782 def cancopy(self):
783 783 return self.local() # so statichttprepo's override of local() works
784 784
785 785 def join(self, f):
786 786 return os.path.join(self.path, f)
787 787
788 788 def wjoin(self, f):
789 789 return os.path.join(self.root, f)
790 790
791 791 def file(self, f):
792 792 if f[0] == '/':
793 793 f = f[1:]
794 794 return filelog.filelog(self.sopener, f)
795 795
796 796 def changectx(self, changeid):
797 797 return self[changeid]
798 798
799 799 def parents(self, changeid=None):
800 800 '''get list of changectxs for parents of changeid'''
801 801 return self[changeid].parents()
802 802
803 803 def setparents(self, p1, p2=nullid):
804 804 copies = self.dirstate.setparents(p1, p2)
805 805 if copies:
806 806 # Adjust copy records, the dirstate cannot do it, it
807 807 # requires access to parents manifests. Preserve them
808 808 # only for entries added to first parent.
809 809 pctx = self[p1]
810 810 for f in copies:
811 811 if f not in pctx and copies[f] in pctx:
812 812 self.dirstate.copy(copies[f], f)
813 813
814 814 def filectx(self, path, changeid=None, fileid=None):
815 815 """changeid can be a changeset revision, node, or tag.
816 816 fileid can be a file revision or node."""
817 817 return context.filectx(self, path, changeid, fileid)
818 818
819 819 def getcwd(self):
820 820 return self.dirstate.getcwd()
821 821
822 822 def pathto(self, f, cwd=None):
823 823 return self.dirstate.pathto(f, cwd)
824 824
825 825 def wfile(self, f, mode='r'):
826 826 return self.wopener(f, mode)
827 827
828 828 def _link(self, f):
829 829 return os.path.islink(self.wjoin(f))
830 830
831 831 def _loadfilter(self, filter):
832 832 if filter not in self.filterpats:
833 833 l = []
834 834 for pat, cmd in self.ui.configitems(filter):
835 835 if cmd == '!':
836 836 continue
837 837 mf = matchmod.match(self.root, '', [pat])
838 838 fn = None
839 839 params = cmd
840 840 for name, filterfn in self._datafilters.iteritems():
841 841 if cmd.startswith(name):
842 842 fn = filterfn
843 843 params = cmd[len(name):].lstrip()
844 844 break
845 845 if not fn:
846 846 fn = lambda s, c, **kwargs: util.filter(s, c)
847 847 # Wrap old filters not supporting keyword arguments
848 848 if not inspect.getargspec(fn)[2]:
849 849 oldfn = fn
850 850 fn = lambda s, c, **kwargs: oldfn(s, c)
851 851 l.append((mf, fn, params))
852 852 self.filterpats[filter] = l
853 853 return self.filterpats[filter]
854 854
855 855 def _filter(self, filterpats, filename, data):
856 856 for mf, fn, cmd in filterpats:
857 857 if mf(filename):
858 858 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
859 859 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
860 860 break
861 861
862 862 return data
863 863
864 864 @propertycache
865 865 def _encodefilterpats(self):
866 866 return self._loadfilter('encode')
867 867
868 868 @propertycache
869 869 def _decodefilterpats(self):
870 870 return self._loadfilter('decode')
871 871
872 872 def adddatafilter(self, name, filter):
873 873 self._datafilters[name] = filter
874 874
875 875 def wread(self, filename):
876 876 if self._link(filename):
877 877 data = os.readlink(self.wjoin(filename))
878 878 else:
879 879 data = self.wopener.read(filename)
880 880 return self._filter(self._encodefilterpats, filename, data)
881 881
882 882 def wwrite(self, filename, data, flags):
883 883 data = self._filter(self._decodefilterpats, filename, data)
884 884 if 'l' in flags:
885 885 self.wopener.symlink(data, filename)
886 886 else:
887 887 self.wopener.write(filename, data)
888 888 if 'x' in flags:
889 889 util.setflags(self.wjoin(filename), False, True)
890 890
891 891 def wwritedata(self, filename, data):
892 892 return self._filter(self._decodefilterpats, filename, data)
893 893
894 894 def transaction(self, desc):
895 895 tr = self._transref and self._transref() or None
896 896 if tr and tr.running():
897 897 return tr.nest()
898 898
899 899 # abort here if the journal already exists
900 900 if os.path.exists(self.sjoin("journal")):
901 901 raise error.RepoError(
902 902 _("abandoned transaction found - run hg recover"))
903 903
904 904 self._writejournal(desc)
905 905 renames = [(x, undoname(x)) for x in self._journalfiles()]
906 906
907 907 tr = transaction.transaction(self.ui.warn, self.sopener,
908 908 self.sjoin("journal"),
909 909 aftertrans(renames),
910 910 self.store.createmode)
911 911 self._transref = weakref.ref(tr)
912 912 return tr
913 913
914 914 def _journalfiles(self):
915 915 return (self.sjoin('journal'), self.join('journal.dirstate'),
916 916 self.join('journal.branch'), self.join('journal.desc'),
917 917 self.join('journal.bookmarks'),
918 918 self.sjoin('journal.phaseroots'))
919 919
920 920 def undofiles(self):
921 921 return [undoname(x) for x in self._journalfiles()]
922 922
923 923 def _writejournal(self, desc):
924 924 self.opener.write("journal.dirstate",
925 925 self.opener.tryread("dirstate"))
926 926 self.opener.write("journal.branch",
927 927 encoding.fromlocal(self.dirstate.branch()))
928 928 self.opener.write("journal.desc",
929 929 "%d\n%s\n" % (len(self), desc))
930 930 self.opener.write("journal.bookmarks",
931 931 self.opener.tryread("bookmarks"))
932 932 self.sopener.write("journal.phaseroots",
933 933 self.sopener.tryread("phaseroots"))
934 934
935 935 def recover(self):
936 936 lock = self.lock()
937 937 try:
938 938 if os.path.exists(self.sjoin("journal")):
939 939 self.ui.status(_("rolling back interrupted transaction\n"))
940 940 transaction.rollback(self.sopener, self.sjoin("journal"),
941 941 self.ui.warn)
942 942 self.invalidate()
943 943 return True
944 944 else:
945 945 self.ui.warn(_("no interrupted transaction available\n"))
946 946 return False
947 947 finally:
948 948 lock.release()
949 949
950 950 def rollback(self, dryrun=False, force=False):
951 951 wlock = lock = None
952 952 try:
953 953 wlock = self.wlock()
954 954 lock = self.lock()
955 955 if os.path.exists(self.sjoin("undo")):
956 956 return self._rollback(dryrun, force)
957 957 else:
958 958 self.ui.warn(_("no rollback information available\n"))
959 959 return 1
960 960 finally:
961 961 release(lock, wlock)
962 962
963 963 def _rollback(self, dryrun, force):
964 964 ui = self.ui
965 965 try:
966 966 args = self.opener.read('undo.desc').splitlines()
967 967 (oldlen, desc, detail) = (int(args[0]), args[1], None)
968 968 if len(args) >= 3:
969 969 detail = args[2]
970 970 oldtip = oldlen - 1
971 971
972 972 if detail and ui.verbose:
973 973 msg = (_('repository tip rolled back to revision %s'
974 974 ' (undo %s: %s)\n')
975 975 % (oldtip, desc, detail))
976 976 else:
977 977 msg = (_('repository tip rolled back to revision %s'
978 978 ' (undo %s)\n')
979 979 % (oldtip, desc))
980 980 except IOError:
981 981 msg = _('rolling back unknown transaction\n')
982 982 desc = None
983 983
984 984 if not force and self['.'] != self['tip'] and desc == 'commit':
985 985 raise util.Abort(
986 986 _('rollback of last commit while not checked out '
987 987 'may lose data'), hint=_('use -f to force'))
988 988
989 989 ui.status(msg)
990 990 if dryrun:
991 991 return 0
992 992
993 993 parents = self.dirstate.parents()
994 994 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
995 995 if os.path.exists(self.join('undo.bookmarks')):
996 996 util.rename(self.join('undo.bookmarks'),
997 997 self.join('bookmarks'))
998 998 if os.path.exists(self.sjoin('undo.phaseroots')):
999 999 util.rename(self.sjoin('undo.phaseroots'),
1000 1000 self.sjoin('phaseroots'))
1001 1001 self.invalidate()
1002 1002
1003 1003 # Discard all cache entries to force reloading everything.
1004 1004 self._filecache.clear()
1005 1005
1006 1006 parentgone = (parents[0] not in self.changelog.nodemap or
1007 1007 parents[1] not in self.changelog.nodemap)
1008 1008 if parentgone:
1009 1009 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1010 1010 try:
1011 1011 branch = self.opener.read('undo.branch')
1012 1012 self.dirstate.setbranch(encoding.tolocal(branch))
1013 1013 except IOError:
1014 1014 ui.warn(_('named branch could not be reset: '
1015 1015 'current branch is still \'%s\'\n')
1016 1016 % self.dirstate.branch())
1017 1017
1018 1018 self.dirstate.invalidate()
1019 1019 parents = tuple([p.rev() for p in self.parents()])
1020 1020 if len(parents) > 1:
1021 1021 ui.status(_('working directory now based on '
1022 1022 'revisions %d and %d\n') % parents)
1023 1023 else:
1024 1024 ui.status(_('working directory now based on '
1025 1025 'revision %d\n') % parents)
1026 1026 # TODO: if we know which new heads may result from this rollback, pass
1027 1027 # them to destroy(), which will prevent the branchhead cache from being
1028 1028 # invalidated.
1029 1029 self.destroyed()
1030 1030 return 0
1031 1031
1032 1032 def invalidatecaches(self):
1033 1033 def delcache(name):
1034 1034 try:
1035 1035 delattr(self, name)
1036 1036 except AttributeError:
1037 1037 pass
1038 1038
1039 1039 delcache('_tagscache')
1040 1040
1041 1041 self._branchcache = None # in UTF-8
1042 1042 self._branchcachetip = None
1043 1043
1044 1044 def invalidatedirstate(self):
1045 1045 '''Invalidates the dirstate, causing the next call to dirstate
1046 1046 to check if it was modified since the last time it was read,
1047 1047 rereading it if it has.
1048 1048
1049 1049 This is different to dirstate.invalidate() that it doesn't always
1050 1050 rereads the dirstate. Use dirstate.invalidate() if you want to
1051 1051 explicitly read the dirstate again (i.e. restoring it to a previous
1052 1052 known good state).'''
1053 1053 if 'dirstate' in self.__dict__:
1054 1054 for k in self.dirstate._filecache:
1055 1055 try:
1056 1056 delattr(self.dirstate, k)
1057 1057 except AttributeError:
1058 1058 pass
1059 1059 delattr(self, 'dirstate')
1060 1060
1061 1061 def invalidate(self):
1062 1062 for k in self._filecache:
1063 1063 # dirstate is invalidated separately in invalidatedirstate()
1064 1064 if k == 'dirstate':
1065 1065 continue
1066 1066
1067 1067 try:
1068 1068 delattr(self, k)
1069 1069 except AttributeError:
1070 1070 pass
1071 1071 self.invalidatecaches()
1072 1072
1073 1073 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1074 1074 try:
1075 1075 l = lock.lock(lockname, 0, releasefn, desc=desc)
1076 1076 except error.LockHeld, inst:
1077 1077 if not wait:
1078 1078 raise
1079 1079 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1080 1080 (desc, inst.locker))
1081 1081 # default to 600 seconds timeout
1082 1082 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1083 1083 releasefn, desc=desc)
1084 1084 if acquirefn:
1085 1085 acquirefn()
1086 1086 return l
1087 1087
1088 1088 def _afterlock(self, callback):
1089 1089 """add a callback to the current repository lock.
1090 1090
1091 1091 The callback will be executed on lock release."""
1092 1092 l = self._lockref and self._lockref()
1093 1093 if l:
1094 1094 l.postrelease.append(callback)
1095 1095 else:
1096 1096 callback()
1097 1097
1098 1098 def lock(self, wait=True):
1099 1099 '''Lock the repository store (.hg/store) and return a weak reference
1100 1100 to the lock. Use this before modifying the store (e.g. committing or
1101 1101 stripping). If you are opening a transaction, get a lock as well.)'''
1102 1102 l = self._lockref and self._lockref()
1103 1103 if l is not None and l.held:
1104 1104 l.lock()
1105 1105 return l
1106 1106
1107 1107 def unlock():
1108 1108 self.store.write()
1109 1109 if '_phasecache' in vars(self):
1110 1110 self._phasecache.write()
1111 1111 for k, ce in self._filecache.items():
1112 1112 if k == 'dirstate':
1113 1113 continue
1114 1114 ce.refresh()
1115 1115
1116 1116 l = self._lock(self.sjoin("lock"), wait, unlock,
1117 1117 self.invalidate, _('repository %s') % self.origroot)
1118 1118 self._lockref = weakref.ref(l)
1119 1119 return l
1120 1120
1121 1121 def wlock(self, wait=True):
1122 1122 '''Lock the non-store parts of the repository (everything under
1123 1123 .hg except .hg/store) and return a weak reference to the lock.
1124 1124 Use this before modifying files in .hg.'''
1125 1125 l = self._wlockref and self._wlockref()
1126 1126 if l is not None and l.held:
1127 1127 l.lock()
1128 1128 return l
1129 1129
1130 1130 def unlock():
1131 1131 self.dirstate.write()
1132 1132 ce = self._filecache.get('dirstate')
1133 1133 if ce:
1134 1134 ce.refresh()
1135 1135
1136 1136 l = self._lock(self.join("wlock"), wait, unlock,
1137 1137 self.invalidatedirstate, _('working directory of %s') %
1138 1138 self.origroot)
1139 1139 self._wlockref = weakref.ref(l)
1140 1140 return l
1141 1141
1142 1142 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1143 1143 """
1144 1144 commit an individual file as part of a larger transaction
1145 1145 """
1146 1146
1147 1147 fname = fctx.path()
1148 1148 text = fctx.data()
1149 1149 flog = self.file(fname)
1150 1150 fparent1 = manifest1.get(fname, nullid)
1151 1151 fparent2 = fparent2o = manifest2.get(fname, nullid)
1152 1152
1153 1153 meta = {}
1154 1154 copy = fctx.renamed()
1155 1155 if copy and copy[0] != fname:
1156 1156 # Mark the new revision of this file as a copy of another
1157 1157 # file. This copy data will effectively act as a parent
1158 1158 # of this new revision. If this is a merge, the first
1159 1159 # parent will be the nullid (meaning "look up the copy data")
1160 1160 # and the second one will be the other parent. For example:
1161 1161 #
1162 1162 # 0 --- 1 --- 3 rev1 changes file foo
1163 1163 # \ / rev2 renames foo to bar and changes it
1164 1164 # \- 2 -/ rev3 should have bar with all changes and
1165 1165 # should record that bar descends from
1166 1166 # bar in rev2 and foo in rev1
1167 1167 #
1168 1168 # this allows this merge to succeed:
1169 1169 #
1170 1170 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1171 1171 # \ / merging rev3 and rev4 should use bar@rev2
1172 1172 # \- 2 --- 4 as the merge base
1173 1173 #
1174 1174
1175 1175 cfname = copy[0]
1176 1176 crev = manifest1.get(cfname)
1177 1177 newfparent = fparent2
1178 1178
1179 1179 if manifest2: # branch merge
1180 1180 if fparent2 == nullid or crev is None: # copied on remote side
1181 1181 if cfname in manifest2:
1182 1182 crev = manifest2[cfname]
1183 1183 newfparent = fparent1
1184 1184
1185 1185 # find source in nearest ancestor if we've lost track
1186 1186 if not crev:
1187 1187 self.ui.debug(" %s: searching for copy revision for %s\n" %
1188 1188 (fname, cfname))
1189 1189 for ancestor in self[None].ancestors():
1190 1190 if cfname in ancestor:
1191 1191 crev = ancestor[cfname].filenode()
1192 1192 break
1193 1193
1194 1194 if crev:
1195 1195 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1196 1196 meta["copy"] = cfname
1197 1197 meta["copyrev"] = hex(crev)
1198 1198 fparent1, fparent2 = nullid, newfparent
1199 1199 else:
1200 1200 self.ui.warn(_("warning: can't find ancestor for '%s' "
1201 1201 "copied from '%s'!\n") % (fname, cfname))
1202 1202
1203 1203 elif fparent2 != nullid:
1204 1204 # is one parent an ancestor of the other?
1205 1205 fparentancestor = flog.ancestor(fparent1, fparent2)
1206 1206 if fparentancestor == fparent1:
1207 1207 fparent1, fparent2 = fparent2, nullid
1208 1208 elif fparentancestor == fparent2:
1209 1209 fparent2 = nullid
1210 1210
1211 1211 # is the file changed?
1212 1212 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1213 1213 changelist.append(fname)
1214 1214 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1215 1215
1216 1216 # are just the flags changed during merge?
1217 1217 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1218 1218 changelist.append(fname)
1219 1219
1220 1220 return fparent1
1221 1221
1222 1222 def commit(self, text="", user=None, date=None, match=None, force=False,
1223 1223 editor=False, extra={}):
1224 1224 """Add a new revision to current repository.
1225 1225
1226 1226 Revision information is gathered from the working directory,
1227 1227 match can be used to filter the committed files. If editor is
1228 1228 supplied, it is called to get a commit message.
1229 1229 """
1230 1230
1231 1231 def fail(f, msg):
1232 1232 raise util.Abort('%s: %s' % (f, msg))
1233 1233
1234 1234 if not match:
1235 1235 match = matchmod.always(self.root, '')
1236 1236
1237 1237 if not force:
1238 1238 vdirs = []
1239 1239 match.dir = vdirs.append
1240 1240 match.bad = fail
1241 1241
1242 1242 wlock = self.wlock()
1243 1243 try:
1244 1244 wctx = self[None]
1245 1245 merge = len(wctx.parents()) > 1
1246 1246
1247 1247 if (not force and merge and match and
1248 1248 (match.files() or match.anypats())):
1249 1249 raise util.Abort(_('cannot partially commit a merge '
1250 1250 '(do not specify files or patterns)'))
1251 1251
1252 1252 changes = self.status(match=match, clean=force)
1253 1253 if force:
1254 1254 changes[0].extend(changes[6]) # mq may commit unchanged files
1255 1255
1256 1256 # check subrepos
1257 1257 subs = []
1258 1258 commitsubs = set()
1259 1259 newstate = wctx.substate.copy()
1260 1260 # only manage subrepos and .hgsubstate if .hgsub is present
1261 1261 if '.hgsub' in wctx:
1262 1262 # we'll decide whether to track this ourselves, thanks
1263 1263 if '.hgsubstate' in changes[0]:
1264 1264 changes[0].remove('.hgsubstate')
1265 1265 if '.hgsubstate' in changes[2]:
1266 1266 changes[2].remove('.hgsubstate')
1267 1267
1268 1268 # compare current state to last committed state
1269 1269 # build new substate based on last committed state
1270 1270 oldstate = wctx.p1().substate
1271 1271 for s in sorted(newstate.keys()):
1272 1272 if not match(s):
1273 1273 # ignore working copy, use old state if present
1274 1274 if s in oldstate:
1275 1275 newstate[s] = oldstate[s]
1276 1276 continue
1277 1277 if not force:
1278 1278 raise util.Abort(
1279 1279 _("commit with new subrepo %s excluded") % s)
1280 1280 if wctx.sub(s).dirty(True):
1281 1281 if not self.ui.configbool('ui', 'commitsubrepos'):
1282 1282 raise util.Abort(
1283 1283 _("uncommitted changes in subrepo %s") % s,
1284 1284 hint=_("use --subrepos for recursive commit"))
1285 1285 subs.append(s)
1286 1286 commitsubs.add(s)
1287 1287 else:
1288 1288 bs = wctx.sub(s).basestate()
1289 1289 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1290 1290 if oldstate.get(s, (None, None, None))[1] != bs:
1291 1291 subs.append(s)
1292 1292
1293 1293 # check for removed subrepos
1294 1294 for p in wctx.parents():
1295 1295 r = [s for s in p.substate if s not in newstate]
1296 1296 subs += [s for s in r if match(s)]
1297 1297 if subs:
1298 1298 if (not match('.hgsub') and
1299 1299 '.hgsub' in (wctx.modified() + wctx.added())):
1300 1300 raise util.Abort(
1301 1301 _("can't commit subrepos without .hgsub"))
1302 1302 changes[0].insert(0, '.hgsubstate')
1303 1303
1304 1304 elif '.hgsub' in changes[2]:
1305 1305 # clean up .hgsubstate when .hgsub is removed
1306 1306 if ('.hgsubstate' in wctx and
1307 1307 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1308 1308 changes[2].insert(0, '.hgsubstate')
1309 1309
1310 1310 # make sure all explicit patterns are matched
1311 1311 if not force and match.files():
1312 1312 matched = set(changes[0] + changes[1] + changes[2])
1313 1313
1314 1314 for f in match.files():
1315 1315 if f == '.' or f in matched or f in wctx.substate:
1316 1316 continue
1317 1317 if f in changes[3]: # missing
1318 1318 fail(f, _('file not found!'))
1319 1319 if f in vdirs: # visited directory
1320 1320 d = f + '/'
1321 1321 for mf in matched:
1322 1322 if mf.startswith(d):
1323 1323 break
1324 1324 else:
1325 1325 fail(f, _("no match under directory!"))
1326 1326 elif f not in self.dirstate:
1327 1327 fail(f, _("file not tracked!"))
1328 1328
1329 1329 if (not force and not extra.get("close") and not merge
1330 1330 and not (changes[0] or changes[1] or changes[2])
1331 1331 and wctx.branch() == wctx.p1().branch()):
1332 1332 return None
1333 1333
1334 1334 if merge and changes[3]:
1335 1335 raise util.Abort(_("cannot commit merge with missing files"))
1336 1336
1337 1337 ms = mergemod.mergestate(self)
1338 1338 for f in changes[0]:
1339 1339 if f in ms and ms[f] == 'u':
1340 1340 raise util.Abort(_("unresolved merge conflicts "
1341 1341 "(see hg help resolve)"))
1342 1342
1343 1343 cctx = context.workingctx(self, text, user, date, extra, changes)
1344 1344 if editor:
1345 1345 cctx._text = editor(self, cctx, subs)
1346 1346 edited = (text != cctx._text)
1347 1347
1348 1348 # commit subs and write new state
1349 1349 if subs:
1350 1350 for s in sorted(commitsubs):
1351 1351 sub = wctx.sub(s)
1352 1352 self.ui.status(_('committing subrepository %s\n') %
1353 1353 subrepo.subrelpath(sub))
1354 1354 sr = sub.commit(cctx._text, user, date)
1355 1355 newstate[s] = (newstate[s][0], sr)
1356 1356 subrepo.writestate(self, newstate)
1357 1357
1358 1358 # Save commit message in case this transaction gets rolled back
1359 1359 # (e.g. by a pretxncommit hook). Leave the content alone on
1360 1360 # the assumption that the user will use the same editor again.
1361 1361 msgfn = self.savecommitmessage(cctx._text)
1362 1362
1363 1363 p1, p2 = self.dirstate.parents()
1364 1364 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1365 1365 try:
1366 1366 self.hook("precommit", throw=True, parent1=hookp1,
1367 1367 parent2=hookp2)
1368 1368 ret = self.commitctx(cctx, True)
1369 1369 except: # re-raises
1370 1370 if edited:
1371 1371 self.ui.write(
1372 1372 _('note: commit message saved in %s\n') % msgfn)
1373 1373 raise
1374 1374
1375 1375 # update bookmarks, dirstate and mergestate
1376 1376 bookmarks.update(self, [p1, p2], ret)
1377 1377 for f in changes[0] + changes[1]:
1378 1378 self.dirstate.normal(f)
1379 1379 for f in changes[2]:
1380 1380 self.dirstate.drop(f)
1381 1381 self.dirstate.setparents(ret)
1382 1382 ms.reset()
1383 1383 finally:
1384 1384 wlock.release()
1385 1385
1386 1386 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1387 1387 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1388 1388 self._afterlock(commithook)
1389 1389 return ret
1390 1390
1391 1391 def commitctx(self, ctx, error=False):
1392 1392 """Add a new revision to current repository.
1393 1393 Revision information is passed via the context argument.
1394 1394 """
1395 1395
1396 1396 tr = lock = None
1397 1397 removed = list(ctx.removed())
1398 1398 p1, p2 = ctx.p1(), ctx.p2()
1399 1399 user = ctx.user()
1400 1400
1401 1401 lock = self.lock()
1402 1402 try:
1403 1403 tr = self.transaction("commit")
1404 1404 trp = weakref.proxy(tr)
1405 1405
1406 1406 if ctx.files():
1407 1407 m1 = p1.manifest().copy()
1408 1408 m2 = p2.manifest()
1409 1409
1410 1410 # check in files
1411 1411 new = {}
1412 1412 changed = []
1413 1413 linkrev = len(self)
1414 1414 for f in sorted(ctx.modified() + ctx.added()):
1415 1415 self.ui.note(f + "\n")
1416 1416 try:
1417 1417 fctx = ctx[f]
1418 1418 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1419 1419 changed)
1420 1420 m1.set(f, fctx.flags())
1421 1421 except OSError, inst:
1422 1422 self.ui.warn(_("trouble committing %s!\n") % f)
1423 1423 raise
1424 1424 except IOError, inst:
1425 1425 errcode = getattr(inst, 'errno', errno.ENOENT)
1426 1426 if error or errcode and errcode != errno.ENOENT:
1427 1427 self.ui.warn(_("trouble committing %s!\n") % f)
1428 1428 raise
1429 1429 else:
1430 1430 removed.append(f)
1431 1431
1432 1432 # update manifest
1433 1433 m1.update(new)
1434 1434 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1435 1435 drop = [f for f in removed if f in m1]
1436 1436 for f in drop:
1437 1437 del m1[f]
1438 1438 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1439 1439 p2.manifestnode(), (new, drop))
1440 1440 files = changed + removed
1441 1441 else:
1442 1442 mn = p1.manifestnode()
1443 1443 files = []
1444 1444
1445 1445 # update changelog
1446 1446 self.changelog.delayupdate()
1447 1447 n = self.changelog.add(mn, files, ctx.description(),
1448 1448 trp, p1.node(), p2.node(),
1449 1449 user, ctx.date(), ctx.extra().copy())
1450 1450 p = lambda: self.changelog.writepending() and self.root or ""
1451 1451 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1452 1452 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1453 1453 parent2=xp2, pending=p)
1454 1454 self.changelog.finalize(trp)
1455 1455 # set the new commit is proper phase
1456 1456 targetphase = phases.newcommitphase(self.ui)
1457 1457 if targetphase:
1458 1458 # retract boundary do not alter parent changeset.
1459 1459 # if a parent have higher the resulting phase will
1460 1460 # be compliant anyway
1461 1461 #
1462 1462 # if minimal phase was 0 we don't need to retract anything
1463 1463 phases.retractboundary(self, targetphase, [n])
1464 1464 tr.close()
1465 1465 self.updatebranchcache()
1466 1466 return n
1467 1467 finally:
1468 1468 if tr:
1469 1469 tr.release()
1470 1470 lock.release()
1471 1471
1472 1472 def destroyed(self, newheadnodes=None):
1473 1473 '''Inform the repository that nodes have been destroyed.
1474 1474 Intended for use by strip and rollback, so there's a common
1475 1475 place for anything that has to be done after destroying history.
1476 1476
1477 1477 If you know the branchheadcache was uptodate before nodes were removed
1478 1478 and you also know the set of candidate new heads that may have resulted
1479 1479 from the destruction, you can set newheadnodes. This will enable the
1480 1480 code to update the branchheads cache, rather than having future code
1481 1481 decide it's invalid and regenrating it from scratch.
1482 1482 '''
1483 1483 # If we have info, newheadnodes, on how to update the branch cache, do
1484 1484 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1485 1485 # will be caught the next time it is read.
1486 1486 if newheadnodes:
1487 1487 tiprev = len(self) - 1
1488 1488 ctxgen = (self[node] for node in newheadnodes
1489 1489 if self.changelog.hasnode(node))
1490 1490 self._updatebranchcache(self._branchcache, ctxgen)
1491 1491 self._writebranchcache(self._branchcache, self.changelog.tip(),
1492 1492 tiprev)
1493 1493
1494 1494 # Ensure the persistent tag cache is updated. Doing it now
1495 1495 # means that the tag cache only has to worry about destroyed
1496 1496 # heads immediately after a strip/rollback. That in turn
1497 1497 # guarantees that "cachetip == currenttip" (comparing both rev
1498 1498 # and node) always means no nodes have been added or destroyed.
1499 1499
1500 1500 # XXX this is suboptimal when qrefresh'ing: we strip the current
1501 1501 # head, refresh the tag cache, then immediately add a new head.
1502 1502 # But I think doing it this way is necessary for the "instant
1503 1503 # tag cache retrieval" case to work.
1504 1504 self.invalidatecaches()
1505 1505
1506 1506 # Discard all cache entries to force reloading everything.
1507 1507 self._filecache.clear()
1508 1508
1509 1509 def walk(self, match, node=None):
1510 1510 '''
1511 1511 walk recursively through the directory tree or a given
1512 1512 changeset, finding all files matched by the match
1513 1513 function
1514 1514 '''
1515 1515 return self[node].walk(match)
1516 1516
1517 1517 def status(self, node1='.', node2=None, match=None,
1518 1518 ignored=False, clean=False, unknown=False,
1519 1519 listsubrepos=False):
1520 1520 """return status of files between two nodes or node and working
1521 1521 directory.
1522 1522
1523 1523 If node1 is None, use the first dirstate parent instead.
1524 1524 If node2 is None, compare node1 with working directory.
1525 1525 """
1526 1526
1527 1527 def mfmatches(ctx):
1528 1528 mf = ctx.manifest().copy()
1529 1529 if match.always():
1530 1530 return mf
1531 1531 for fn in mf.keys():
1532 1532 if not match(fn):
1533 1533 del mf[fn]
1534 1534 return mf
1535 1535
1536 1536 if isinstance(node1, context.changectx):
1537 1537 ctx1 = node1
1538 1538 else:
1539 1539 ctx1 = self[node1]
1540 1540 if isinstance(node2, context.changectx):
1541 1541 ctx2 = node2
1542 1542 else:
1543 1543 ctx2 = self[node2]
1544 1544
1545 1545 working = ctx2.rev() is None
1546 1546 parentworking = working and ctx1 == self['.']
1547 1547 match = match or matchmod.always(self.root, self.getcwd())
1548 1548 listignored, listclean, listunknown = ignored, clean, unknown
1549 1549
1550 1550 # load earliest manifest first for caching reasons
1551 1551 if not working and ctx2.rev() < ctx1.rev():
1552 1552 ctx2.manifest()
1553 1553
1554 1554 if not parentworking:
1555 1555 def bad(f, msg):
1556 1556 # 'f' may be a directory pattern from 'match.files()',
1557 1557 # so 'f not in ctx1' is not enough
1558 1558 if f not in ctx1 and f not in ctx1.dirs():
1559 1559 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1560 1560 match.bad = bad
1561 1561
1562 1562 if working: # we need to scan the working dir
1563 1563 subrepos = []
1564 1564 if '.hgsub' in self.dirstate:
1565 1565 subrepos = ctx2.substate.keys()
1566 1566 s = self.dirstate.status(match, subrepos, listignored,
1567 1567 listclean, listunknown)
1568 1568 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1569 1569
1570 1570 # check for any possibly clean files
1571 1571 if parentworking and cmp:
1572 1572 fixup = []
1573 1573 # do a full compare of any files that might have changed
1574 1574 for f in sorted(cmp):
1575 1575 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1576 1576 or ctx1[f].cmp(ctx2[f])):
1577 1577 modified.append(f)
1578 1578 else:
1579 1579 fixup.append(f)
1580 1580
1581 1581 # update dirstate for files that are actually clean
1582 1582 if fixup:
1583 1583 if listclean:
1584 1584 clean += fixup
1585 1585
1586 1586 try:
1587 1587 # updating the dirstate is optional
1588 1588 # so we don't wait on the lock
1589 1589 wlock = self.wlock(False)
1590 1590 try:
1591 1591 for f in fixup:
1592 1592 self.dirstate.normal(f)
1593 1593 finally:
1594 1594 wlock.release()
1595 1595 except error.LockError:
1596 1596 pass
1597 1597
1598 1598 if not parentworking:
1599 1599 mf1 = mfmatches(ctx1)
1600 1600 if working:
1601 1601 # we are comparing working dir against non-parent
1602 1602 # generate a pseudo-manifest for the working dir
1603 1603 mf2 = mfmatches(self['.'])
1604 1604 for f in cmp + modified + added:
1605 1605 mf2[f] = None
1606 1606 mf2.set(f, ctx2.flags(f))
1607 1607 for f in removed:
1608 1608 if f in mf2:
1609 1609 del mf2[f]
1610 1610 else:
1611 1611 # we are comparing two revisions
1612 1612 deleted, unknown, ignored = [], [], []
1613 1613 mf2 = mfmatches(ctx2)
1614 1614
1615 1615 modified, added, clean = [], [], []
1616 1616 withflags = mf1.withflags() | mf2.withflags()
1617 1617 for fn in mf2:
1618 1618 if fn in mf1:
1619 1619 if (fn not in deleted and
1620 1620 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1621 1621 (mf1[fn] != mf2[fn] and
1622 1622 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1623 1623 modified.append(fn)
1624 1624 elif listclean:
1625 1625 clean.append(fn)
1626 1626 del mf1[fn]
1627 1627 elif fn not in deleted:
1628 1628 added.append(fn)
1629 1629 removed = mf1.keys()
1630 1630
1631 1631 if working and modified and not self.dirstate._checklink:
1632 1632 # Symlink placeholders may get non-symlink-like contents
1633 1633 # via user error or dereferencing by NFS or Samba servers,
1634 1634 # so we filter out any placeholders that don't look like a
1635 1635 # symlink
1636 1636 sane = []
1637 1637 for f in modified:
1638 1638 if ctx2.flags(f) == 'l':
1639 1639 d = ctx2[f].data()
1640 1640 if len(d) >= 1024 or '\n' in d or util.binary(d):
1641 1641 self.ui.debug('ignoring suspect symlink placeholder'
1642 1642 ' "%s"\n' % f)
1643 1643 continue
1644 1644 sane.append(f)
1645 1645 modified = sane
1646 1646
1647 1647 r = modified, added, removed, deleted, unknown, ignored, clean
1648 1648
1649 1649 if listsubrepos:
1650 1650 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1651 1651 if working:
1652 1652 rev2 = None
1653 1653 else:
1654 1654 rev2 = ctx2.substate[subpath][1]
1655 1655 try:
1656 1656 submatch = matchmod.narrowmatcher(subpath, match)
1657 1657 s = sub.status(rev2, match=submatch, ignored=listignored,
1658 1658 clean=listclean, unknown=listunknown,
1659 1659 listsubrepos=True)
1660 1660 for rfiles, sfiles in zip(r, s):
1661 1661 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1662 1662 except error.LookupError:
1663 1663 self.ui.status(_("skipping missing subrepository: %s\n")
1664 1664 % subpath)
1665 1665
1666 1666 for l in r:
1667 1667 l.sort()
1668 1668 return r
1669 1669
1670 1670 def heads(self, start=None):
1671 1671 heads = self.changelog.heads(start)
1672 1672 # sort the output in rev descending order
1673 1673 return sorted(heads, key=self.changelog.rev, reverse=True)
1674 1674
1675 1675 def branchheads(self, branch=None, start=None, closed=False):
1676 1676 '''return a (possibly filtered) list of heads for the given branch
1677 1677
1678 1678 Heads are returned in topological order, from newest to oldest.
1679 1679 If branch is None, use the dirstate branch.
1680 1680 If start is not None, return only heads reachable from start.
1681 1681 If closed is True, return heads that are marked as closed as well.
1682 1682 '''
1683 1683 if branch is None:
1684 1684 branch = self[None].branch()
1685 1685 branches = self.branchmap()
1686 1686 if branch not in branches:
1687 1687 return []
1688 1688 # the cache returns heads ordered lowest to highest
1689 1689 bheads = list(reversed(branches[branch]))
1690 1690 if start is not None:
1691 1691 # filter out the heads that cannot be reached from startrev
1692 1692 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1693 1693 bheads = [h for h in bheads if h in fbheads]
1694 1694 if not closed:
1695 1695 bheads = [h for h in bheads if not self[h].closesbranch()]
1696 1696 return bheads
1697 1697
1698 1698 def branches(self, nodes):
1699 1699 if not nodes:
1700 1700 nodes = [self.changelog.tip()]
1701 1701 b = []
1702 1702 for n in nodes:
1703 1703 t = n
1704 1704 while True:
1705 1705 p = self.changelog.parents(n)
1706 1706 if p[1] != nullid or p[0] == nullid:
1707 1707 b.append((t, n, p[0], p[1]))
1708 1708 break
1709 1709 n = p[0]
1710 1710 return b
1711 1711
1712 1712 def between(self, pairs):
1713 1713 r = []
1714 1714
1715 1715 for top, bottom in pairs:
1716 1716 n, l, i = top, [], 0
1717 1717 f = 1
1718 1718
1719 1719 while n != bottom and n != nullid:
1720 1720 p = self.changelog.parents(n)[0]
1721 1721 if i == f:
1722 1722 l.append(n)
1723 1723 f = f * 2
1724 1724 n = p
1725 1725 i += 1
1726 1726
1727 1727 r.append(l)
1728 1728
1729 1729 return r
1730 1730
1731 1731 def pull(self, remote, heads=None, force=False):
1732 1732 # don't open transaction for nothing or you break future useful
1733 1733 # rollback call
1734 1734 tr = None
1735 1735 trname = 'pull\n' + util.hidepassword(remote.url())
1736 1736 lock = self.lock()
1737 1737 try:
1738 1738 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1739 1739 force=force)
1740 1740 common, fetch, rheads = tmp
1741 1741 if not fetch:
1742 1742 self.ui.status(_("no changes found\n"))
1743 1743 added = []
1744 1744 result = 0
1745 1745 else:
1746 1746 tr = self.transaction(trname)
1747 1747 if heads is None and list(common) == [nullid]:
1748 1748 self.ui.status(_("requesting all changes\n"))
1749 1749 elif heads is None and remote.capable('changegroupsubset'):
1750 1750 # issue1320, avoid a race if remote changed after discovery
1751 1751 heads = rheads
1752 1752
1753 1753 if remote.capable('getbundle'):
1754 1754 cg = remote.getbundle('pull', common=common,
1755 1755 heads=heads or rheads)
1756 1756 elif heads is None:
1757 1757 cg = remote.changegroup(fetch, 'pull')
1758 1758 elif not remote.capable('changegroupsubset'):
1759 1759 raise util.Abort(_("partial pull cannot be done because "
1760 1760 "other repository doesn't support "
1761 1761 "changegroupsubset."))
1762 1762 else:
1763 1763 cg = remote.changegroupsubset(fetch, heads, 'pull')
1764 1764 clstart = len(self.changelog)
1765 1765 result = self.addchangegroup(cg, 'pull', remote.url())
1766 1766 clend = len(self.changelog)
1767 1767 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1768 1768
1769 1769 # compute target subset
1770 1770 if heads is None:
1771 1771 # We pulled every thing possible
1772 1772 # sync on everything common
1773 1773 subset = common + added
1774 1774 else:
1775 1775 # We pulled a specific subset
1776 1776 # sync on this subset
1777 1777 subset = heads
1778 1778
1779 1779 # Get remote phases data from remote
1780 1780 remotephases = remote.listkeys('phases')
1781 1781 publishing = bool(remotephases.get('publishing', False))
1782 1782 if remotephases and not publishing:
1783 1783 # remote is new and unpublishing
1784 1784 pheads, _dr = phases.analyzeremotephases(self, subset,
1785 1785 remotephases)
1786 1786 phases.advanceboundary(self, phases.public, pheads)
1787 1787 phases.advanceboundary(self, phases.draft, subset)
1788 1788 else:
1789 1789 # Remote is old or publishing all common changesets
1790 1790 # should be seen as public
1791 1791 phases.advanceboundary(self, phases.public, subset)
1792 1792
1793 1793 if obsolete._enabled:
1794 1794 self.ui.debug('fetching remote obsolete markers')
1795 1795 remoteobs = remote.listkeys('obsolete')
1796 1796 if 'dump0' in remoteobs:
1797 1797 if tr is None:
1798 1798 tr = self.transaction(trname)
1799 1799 for key in sorted(remoteobs, reverse=True):
1800 1800 if key.startswith('dump'):
1801 1801 data = base85.b85decode(remoteobs[key])
1802 1802 self.obsstore.mergemarkers(tr, data)
1803 1803 if tr is not None:
1804 1804 tr.close()
1805 1805 finally:
1806 1806 if tr is not None:
1807 1807 tr.release()
1808 1808 lock.release()
1809 1809
1810 1810 return result
1811 1811
1812 1812 def checkpush(self, force, revs):
1813 1813 """Extensions can override this function if additional checks have
1814 1814 to be performed before pushing, or call it if they override push
1815 1815 command.
1816 1816 """
1817 1817 pass
1818 1818
1819 1819 def push(self, remote, force=False, revs=None, newbranch=False):
1820 1820 '''Push outgoing changesets (limited by revs) from the current
1821 1821 repository to remote. Return an integer:
1822 1822 - None means nothing to push
1823 1823 - 0 means HTTP error
1824 1824 - 1 means we pushed and remote head count is unchanged *or*
1825 1825 we have outgoing changesets but refused to push
1826 1826 - other values as described by addchangegroup()
1827 1827 '''
1828 1828 # there are two ways to push to remote repo:
1829 1829 #
1830 1830 # addchangegroup assumes local user can lock remote
1831 1831 # repo (local filesystem, old ssh servers).
1832 1832 #
1833 1833 # unbundle assumes local user cannot lock remote repo (new ssh
1834 1834 # servers, http servers).
1835 1835
1836 1836 if not remote.canpush():
1837 1837 raise util.Abort(_("destination does not support push"))
1838 1838 # get local lock as we might write phase data
1839 1839 locallock = self.lock()
1840 1840 try:
1841 1841 self.checkpush(force, revs)
1842 1842 lock = None
1843 1843 unbundle = remote.capable('unbundle')
1844 1844 if not unbundle:
1845 1845 lock = remote.lock()
1846 1846 try:
1847 1847 # discovery
1848 1848 fci = discovery.findcommonincoming
1849 1849 commoninc = fci(self, remote, force=force)
1850 1850 common, inc, remoteheads = commoninc
1851 1851 fco = discovery.findcommonoutgoing
1852 1852 outgoing = fco(self, remote, onlyheads=revs,
1853 1853 commoninc=commoninc, force=force)
1854 1854
1855 1855
1856 1856 if not outgoing.missing:
1857 1857 # nothing to push
1858 1858 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1859 1859 ret = None
1860 1860 else:
1861 1861 # something to push
1862 1862 if not force:
1863 1863 # if self.obsstore == False --> no obsolete
1864 1864 # then, save the iteration
1865 1865 if self.obsstore:
1866 1866 # this message are here for 80 char limit reason
1867 1867 mso = _("push includes an obsolete changeset: %s!")
1868 1868 msu = _("push includes an unstable changeset: %s!")
1869 1869 # If we are to push if there is at least one
1870 1870 # obsolete or unstable changeset in missing, at
1871 1871 # least one of the missinghead will be obsolete or
1872 1872 # unstable. So checking heads only is ok
1873 1873 for node in outgoing.missingheads:
1874 1874 ctx = self[node]
1875 1875 if ctx.obsolete():
1876 1876 raise util.Abort(_(mso) % ctx)
1877 1877 elif ctx.unstable():
1878 1878 raise util.Abort(_(msu) % ctx)
1879 1879 discovery.checkheads(self, remote, outgoing,
1880 1880 remoteheads, newbranch,
1881 1881 bool(inc))
1882 1882
1883 1883 # create a changegroup from local
1884 1884 if revs is None and not outgoing.excluded:
1885 1885 # push everything,
1886 1886 # use the fast path, no race possible on push
1887 1887 cg = self._changegroup(outgoing.missing, 'push')
1888 1888 else:
1889 1889 cg = self.getlocalbundle('push', outgoing)
1890 1890
1891 1891 # apply changegroup to remote
1892 1892 if unbundle:
1893 1893 # local repo finds heads on server, finds out what
1894 1894 # revs it must push. once revs transferred, if server
1895 1895 # finds it has different heads (someone else won
1896 1896 # commit/push race), server aborts.
1897 1897 if force:
1898 1898 remoteheads = ['force']
1899 1899 # ssh: return remote's addchangegroup()
1900 1900 # http: return remote's addchangegroup() or 0 for error
1901 1901 ret = remote.unbundle(cg, remoteheads, 'push')
1902 1902 else:
1903 1903 # we return an integer indicating remote head count
1904 1904 # change
1905 1905 ret = remote.addchangegroup(cg, 'push', self.url())
1906 1906
1907 1907 if ret:
1908 1908 # push succeed, synchronize target of the push
1909 1909 cheads = outgoing.missingheads
1910 1910 elif revs is None:
1911 1911 # All out push fails. synchronize all common
1912 1912 cheads = outgoing.commonheads
1913 1913 else:
1914 1914 # I want cheads = heads(::missingheads and ::commonheads)
1915 1915 # (missingheads is revs with secret changeset filtered out)
1916 1916 #
1917 1917 # This can be expressed as:
1918 1918 # cheads = ( (missingheads and ::commonheads)
1919 1919 # + (commonheads and ::missingheads))"
1920 1920 # )
1921 1921 #
1922 1922 # while trying to push we already computed the following:
1923 1923 # common = (::commonheads)
1924 1924 # missing = ((commonheads::missingheads) - commonheads)
1925 1925 #
1926 1926 # We can pick:
1927 1927 # * missingheads part of comon (::commonheads)
1928 1928 common = set(outgoing.common)
1929 1929 cheads = [node for node in revs if node in common]
1930 1930 # and
1931 1931 # * commonheads parents on missing
1932 1932 revset = self.set('%ln and parents(roots(%ln))',
1933 1933 outgoing.commonheads,
1934 1934 outgoing.missing)
1935 1935 cheads.extend(c.node() for c in revset)
1936 1936 # even when we don't push, exchanging phase data is useful
1937 1937 remotephases = remote.listkeys('phases')
1938 1938 if not remotephases: # old server or public only repo
1939 1939 phases.advanceboundary(self, phases.public, cheads)
1940 1940 # don't push any phase data as there is nothing to push
1941 1941 else:
1942 1942 ana = phases.analyzeremotephases(self, cheads, remotephases)
1943 1943 pheads, droots = ana
1944 1944 ### Apply remote phase on local
1945 1945 if remotephases.get('publishing', False):
1946 1946 phases.advanceboundary(self, phases.public, cheads)
1947 1947 else: # publish = False
1948 1948 phases.advanceboundary(self, phases.public, pheads)
1949 1949 phases.advanceboundary(self, phases.draft, cheads)
1950 1950 ### Apply local phase on remote
1951 1951
1952 1952 # Get the list of all revs draft on remote by public here.
1953 1953 # XXX Beware that revset break if droots is not strictly
1954 1954 # XXX root we may want to ensure it is but it is costly
1955 1955 outdated = self.set('heads((%ln::%ln) and public())',
1956 1956 droots, cheads)
1957 1957 for newremotehead in outdated:
1958 1958 r = remote.pushkey('phases',
1959 1959 newremotehead.hex(),
1960 1960 str(phases.draft),
1961 1961 str(phases.public))
1962 1962 if not r:
1963 1963 self.ui.warn(_('updating %s to public failed!\n')
1964 1964 % newremotehead)
1965 1965 self.ui.debug('try to push obsolete markers to remote\n')
1966 1966 if (obsolete._enabled and self.obsstore and
1967 1967 'obsolete' in remote.listkeys('namespaces')):
1968 1968 rslts = []
1969 1969 remotedata = self.listkeys('obsolete')
1970 1970 for key in sorted(remotedata, reverse=True):
1971 1971 # reverse sort to ensure we end with dump0
1972 1972 data = remotedata[key]
1973 1973 rslts.append(remote.pushkey('obsolete', key, '', data))
1974 1974 if [r for r in rslts if not r]:
1975 1975 msg = _('failed to push some obsolete markers!\n')
1976 1976 self.ui.warn(msg)
1977 1977 finally:
1978 1978 if lock is not None:
1979 1979 lock.release()
1980 1980 finally:
1981 1981 locallock.release()
1982 1982
1983 1983 self.ui.debug("checking for updated bookmarks\n")
1984 1984 rb = remote.listkeys('bookmarks')
1985 1985 for k in rb.keys():
1986 1986 if k in self._bookmarks:
1987 1987 nr, nl = rb[k], hex(self._bookmarks[k])
1988 1988 if nr in self:
1989 1989 cr = self[nr]
1990 1990 cl = self[nl]
1991 1991 if cl in cr.descendants():
1992 1992 r = remote.pushkey('bookmarks', k, nr, nl)
1993 1993 if r:
1994 1994 self.ui.status(_("updating bookmark %s\n") % k)
1995 1995 else:
1996 1996 self.ui.warn(_('updating bookmark %s'
1997 1997 ' failed!\n') % k)
1998 1998
1999 1999 return ret
2000 2000
2001 2001 def changegroupinfo(self, nodes, source):
2002 2002 if self.ui.verbose or source == 'bundle':
2003 2003 self.ui.status(_("%d changesets found\n") % len(nodes))
2004 2004 if self.ui.debugflag:
2005 2005 self.ui.debug("list of changesets:\n")
2006 2006 for node in nodes:
2007 2007 self.ui.debug("%s\n" % hex(node))
2008 2008
2009 2009 def changegroupsubset(self, bases, heads, source):
2010 2010 """Compute a changegroup consisting of all the nodes that are
2011 2011 descendants of any of the bases and ancestors of any of the heads.
2012 2012 Return a chunkbuffer object whose read() method will return
2013 2013 successive changegroup chunks.
2014 2014
2015 2015 It is fairly complex as determining which filenodes and which
2016 2016 manifest nodes need to be included for the changeset to be complete
2017 2017 is non-trivial.
2018 2018
2019 2019 Another wrinkle is doing the reverse, figuring out which changeset in
2020 2020 the changegroup a particular filenode or manifestnode belongs to.
2021 2021 """
2022 2022 cl = self.changelog
2023 2023 if not bases:
2024 2024 bases = [nullid]
2025 2025 csets, bases, heads = cl.nodesbetween(bases, heads)
2026 2026 # We assume that all ancestors of bases are known
2027 2027 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2028 2028 return self._changegroupsubset(common, csets, heads, source)
2029 2029
2030 2030 def getlocalbundle(self, source, outgoing):
2031 2031 """Like getbundle, but taking a discovery.outgoing as an argument.
2032 2032
2033 2033 This is only implemented for local repos and reuses potentially
2034 2034 precomputed sets in outgoing."""
2035 2035 if not outgoing.missing:
2036 2036 return None
2037 2037 return self._changegroupsubset(outgoing.common,
2038 2038 outgoing.missing,
2039 2039 outgoing.missingheads,
2040 2040 source)
2041 2041
2042 2042 def getbundle(self, source, heads=None, common=None):
2043 2043 """Like changegroupsubset, but returns the set difference between the
2044 2044 ancestors of heads and the ancestors common.
2045 2045
2046 2046 If heads is None, use the local heads. If common is None, use [nullid].
2047 2047
2048 2048 The nodes in common might not all be known locally due to the way the
2049 2049 current discovery protocol works.
2050 2050 """
2051 2051 cl = self.changelog
2052 2052 if common:
2053 2053 nm = cl.nodemap
2054 2054 common = [n for n in common if n in nm]
2055 2055 else:
2056 2056 common = [nullid]
2057 2057 if not heads:
2058 2058 heads = cl.heads()
2059 2059 return self.getlocalbundle(source,
2060 2060 discovery.outgoing(cl, common, heads))
2061 2061
2062 2062 def _changegroupsubset(self, commonrevs, csets, heads, source):
2063 2063
2064 2064 cl = self.changelog
2065 2065 mf = self.manifest
2066 2066 mfs = {} # needed manifests
2067 2067 fnodes = {} # needed file nodes
2068 2068 changedfiles = set()
2069 2069 fstate = ['', {}]
2070 2070 count = [0, 0]
2071 2071
2072 2072 # can we go through the fast path ?
2073 2073 heads.sort()
2074 2074 if heads == sorted(self.heads()):
2075 2075 return self._changegroup(csets, source)
2076 2076
2077 2077 # slow path
2078 2078 self.hook('preoutgoing', throw=True, source=source)
2079 2079 self.changegroupinfo(csets, source)
2080 2080
2081 2081 # filter any nodes that claim to be part of the known set
2082 2082 def prune(revlog, missing):
2083 2083 rr, rl = revlog.rev, revlog.linkrev
2084 2084 return [n for n in missing
2085 2085 if rl(rr(n)) not in commonrevs]
2086 2086
2087 2087 progress = self.ui.progress
2088 2088 _bundling = _('bundling')
2089 2089 _changesets = _('changesets')
2090 2090 _manifests = _('manifests')
2091 2091 _files = _('files')
2092 2092
2093 2093 def lookup(revlog, x):
2094 2094 if revlog == cl:
2095 2095 c = cl.read(x)
2096 2096 changedfiles.update(c[3])
2097 2097 mfs.setdefault(c[0], x)
2098 2098 count[0] += 1
2099 2099 progress(_bundling, count[0],
2100 2100 unit=_changesets, total=count[1])
2101 2101 return x
2102 2102 elif revlog == mf:
2103 2103 clnode = mfs[x]
2104 2104 mdata = mf.readfast(x)
2105 2105 for f, n in mdata.iteritems():
2106 2106 if f in changedfiles:
2107 2107 fnodes[f].setdefault(n, clnode)
2108 2108 count[0] += 1
2109 2109 progress(_bundling, count[0],
2110 2110 unit=_manifests, total=count[1])
2111 2111 return clnode
2112 2112 else:
2113 2113 progress(_bundling, count[0], item=fstate[0],
2114 2114 unit=_files, total=count[1])
2115 2115 return fstate[1][x]
2116 2116
2117 2117 bundler = changegroup.bundle10(lookup)
2118 2118 reorder = self.ui.config('bundle', 'reorder', 'auto')
2119 2119 if reorder == 'auto':
2120 2120 reorder = None
2121 2121 else:
2122 2122 reorder = util.parsebool(reorder)
2123 2123
2124 2124 def gengroup():
2125 2125 # Create a changenode group generator that will call our functions
2126 2126 # back to lookup the owning changenode and collect information.
2127 2127 count[:] = [0, len(csets)]
2128 2128 for chunk in cl.group(csets, bundler, reorder=reorder):
2129 2129 yield chunk
2130 2130 progress(_bundling, None)
2131 2131
2132 2132 # Create a generator for the manifestnodes that calls our lookup
2133 2133 # and data collection functions back.
2134 2134 for f in changedfiles:
2135 2135 fnodes[f] = {}
2136 2136 count[:] = [0, len(mfs)]
2137 2137 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2138 2138 yield chunk
2139 2139 progress(_bundling, None)
2140 2140
2141 2141 mfs.clear()
2142 2142
2143 2143 # Go through all our files in order sorted by name.
2144 2144 count[:] = [0, len(changedfiles)]
2145 2145 for fname in sorted(changedfiles):
2146 2146 filerevlog = self.file(fname)
2147 2147 if not len(filerevlog):
2148 2148 raise util.Abort(_("empty or missing revlog for %s")
2149 2149 % fname)
2150 2150 fstate[0] = fname
2151 2151 fstate[1] = fnodes.pop(fname, {})
2152 2152
2153 2153 nodelist = prune(filerevlog, fstate[1])
2154 2154 if nodelist:
2155 2155 count[0] += 1
2156 2156 yield bundler.fileheader(fname)
2157 2157 for chunk in filerevlog.group(nodelist, bundler, reorder):
2158 2158 yield chunk
2159 2159
2160 2160 # Signal that no more groups are left.
2161 2161 yield bundler.close()
2162 2162 progress(_bundling, None)
2163 2163
2164 2164 if csets:
2165 2165 self.hook('outgoing', node=hex(csets[0]), source=source)
2166 2166
2167 2167 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2168 2168
2169 2169 def changegroup(self, basenodes, source):
2170 2170 # to avoid a race we use changegroupsubset() (issue1320)
2171 2171 return self.changegroupsubset(basenodes, self.heads(), source)
2172 2172
2173 2173 def _changegroup(self, nodes, source):
2174 2174 """Compute the changegroup of all nodes that we have that a recipient
2175 2175 doesn't. Return a chunkbuffer object whose read() method will return
2176 2176 successive changegroup chunks.
2177 2177
2178 2178 This is much easier than the previous function as we can assume that
2179 2179 the recipient has any changenode we aren't sending them.
2180 2180
2181 2181 nodes is the set of nodes to send"""
2182 2182
2183 2183 cl = self.changelog
2184 2184 mf = self.manifest
2185 2185 mfs = {}
2186 2186 changedfiles = set()
2187 2187 fstate = ['']
2188 2188 count = [0, 0]
2189 2189
2190 2190 self.hook('preoutgoing', throw=True, source=source)
2191 2191 self.changegroupinfo(nodes, source)
2192 2192
2193 2193 revset = set([cl.rev(n) for n in nodes])
2194 2194
2195 2195 def gennodelst(log):
2196 2196 ln, llr = log.node, log.linkrev
2197 2197 return [ln(r) for r in log if llr(r) in revset]
2198 2198
2199 2199 progress = self.ui.progress
2200 2200 _bundling = _('bundling')
2201 2201 _changesets = _('changesets')
2202 2202 _manifests = _('manifests')
2203 2203 _files = _('files')
2204 2204
2205 2205 def lookup(revlog, x):
2206 2206 if revlog == cl:
2207 2207 c = cl.read(x)
2208 2208 changedfiles.update(c[3])
2209 2209 mfs.setdefault(c[0], x)
2210 2210 count[0] += 1
2211 2211 progress(_bundling, count[0],
2212 2212 unit=_changesets, total=count[1])
2213 2213 return x
2214 2214 elif revlog == mf:
2215 2215 count[0] += 1
2216 2216 progress(_bundling, count[0],
2217 2217 unit=_manifests, total=count[1])
2218 2218 return cl.node(revlog.linkrev(revlog.rev(x)))
2219 2219 else:
2220 2220 progress(_bundling, count[0], item=fstate[0],
2221 2221 total=count[1], unit=_files)
2222 2222 return cl.node(revlog.linkrev(revlog.rev(x)))
2223 2223
2224 2224 bundler = changegroup.bundle10(lookup)
2225 2225 reorder = self.ui.config('bundle', 'reorder', 'auto')
2226 2226 if reorder == 'auto':
2227 2227 reorder = None
2228 2228 else:
2229 2229 reorder = util.parsebool(reorder)
2230 2230
2231 2231 def gengroup():
2232 2232 '''yield a sequence of changegroup chunks (strings)'''
2233 2233 # construct a list of all changed files
2234 2234
2235 2235 count[:] = [0, len(nodes)]
2236 2236 for chunk in cl.group(nodes, bundler, reorder=reorder):
2237 2237 yield chunk
2238 2238 progress(_bundling, None)
2239 2239
2240 2240 count[:] = [0, len(mfs)]
2241 2241 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2242 2242 yield chunk
2243 2243 progress(_bundling, None)
2244 2244
2245 2245 count[:] = [0, len(changedfiles)]
2246 2246 for fname in sorted(changedfiles):
2247 2247 filerevlog = self.file(fname)
2248 2248 if not len(filerevlog):
2249 2249 raise util.Abort(_("empty or missing revlog for %s")
2250 2250 % fname)
2251 2251 fstate[0] = fname
2252 2252 nodelist = gennodelst(filerevlog)
2253 2253 if nodelist:
2254 2254 count[0] += 1
2255 2255 yield bundler.fileheader(fname)
2256 2256 for chunk in filerevlog.group(nodelist, bundler, reorder):
2257 2257 yield chunk
2258 2258 yield bundler.close()
2259 2259 progress(_bundling, None)
2260 2260
2261 2261 if nodes:
2262 2262 self.hook('outgoing', node=hex(nodes[0]), source=source)
2263 2263
2264 2264 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2265 2265
2266 2266 def addchangegroup(self, source, srctype, url, emptyok=False):
2267 2267 """Add the changegroup returned by source.read() to this repo.
2268 2268 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2269 2269 the URL of the repo where this changegroup is coming from.
2270 2270
2271 2271 Return an integer summarizing the change to this repo:
2272 2272 - nothing changed or no source: 0
2273 2273 - more heads than before: 1+added heads (2..n)
2274 2274 - fewer heads than before: -1-removed heads (-2..-n)
2275 2275 - number of heads stays the same: 1
2276 2276 """
2277 2277 def csmap(x):
2278 2278 self.ui.debug("add changeset %s\n" % short(x))
2279 2279 return len(cl)
2280 2280
2281 2281 def revmap(x):
2282 2282 return cl.rev(x)
2283 2283
2284 2284 if not source:
2285 2285 return 0
2286 2286
2287 2287 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2288 2288
2289 2289 changesets = files = revisions = 0
2290 2290 efiles = set()
2291 2291
2292 2292 # write changelog data to temp files so concurrent readers will not see
2293 2293 # inconsistent view
2294 2294 cl = self.changelog
2295 2295 cl.delayupdate()
2296 2296 oldheads = cl.heads()
2297 2297
2298 2298 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2299 2299 try:
2300 2300 trp = weakref.proxy(tr)
2301 2301 # pull off the changeset group
2302 2302 self.ui.status(_("adding changesets\n"))
2303 2303 clstart = len(cl)
2304 2304 class prog(object):
2305 2305 step = _('changesets')
2306 2306 count = 1
2307 2307 ui = self.ui
2308 2308 total = None
2309 2309 def __call__(self):
2310 2310 self.ui.progress(self.step, self.count, unit=_('chunks'),
2311 2311 total=self.total)
2312 2312 self.count += 1
2313 2313 pr = prog()
2314 2314 source.callback = pr
2315 2315
2316 2316 source.changelogheader()
2317 2317 srccontent = cl.addgroup(source, csmap, trp)
2318 2318 if not (srccontent or emptyok):
2319 2319 raise util.Abort(_("received changelog group is empty"))
2320 2320 clend = len(cl)
2321 2321 changesets = clend - clstart
2322 2322 for c in xrange(clstart, clend):
2323 2323 efiles.update(self[c].files())
2324 2324 efiles = len(efiles)
2325 2325 self.ui.progress(_('changesets'), None)
2326 2326
2327 2327 # pull off the manifest group
2328 2328 self.ui.status(_("adding manifests\n"))
2329 2329 pr.step = _('manifests')
2330 2330 pr.count = 1
2331 2331 pr.total = changesets # manifests <= changesets
2332 2332 # no need to check for empty manifest group here:
2333 2333 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2334 2334 # no new manifest will be created and the manifest group will
2335 2335 # be empty during the pull
2336 2336 source.manifestheader()
2337 2337 self.manifest.addgroup(source, revmap, trp)
2338 2338 self.ui.progress(_('manifests'), None)
2339 2339
2340 2340 needfiles = {}
2341 2341 if self.ui.configbool('server', 'validate', default=False):
2342 2342 # validate incoming csets have their manifests
2343 2343 for cset in xrange(clstart, clend):
2344 2344 mfest = self.changelog.read(self.changelog.node(cset))[0]
2345 2345 mfest = self.manifest.readdelta(mfest)
2346 2346 # store file nodes we must see
2347 2347 for f, n in mfest.iteritems():
2348 2348 needfiles.setdefault(f, set()).add(n)
2349 2349
2350 2350 # process the files
2351 2351 self.ui.status(_("adding file changes\n"))
2352 2352 pr.step = _('files')
2353 2353 pr.count = 1
2354 2354 pr.total = efiles
2355 2355 source.callback = None
2356 2356
2357 2357 while True:
2358 2358 chunkdata = source.filelogheader()
2359 2359 if not chunkdata:
2360 2360 break
2361 2361 f = chunkdata["filename"]
2362 2362 self.ui.debug("adding %s revisions\n" % f)
2363 2363 pr()
2364 2364 fl = self.file(f)
2365 2365 o = len(fl)
2366 2366 if not fl.addgroup(source, revmap, trp):
2367 2367 raise util.Abort(_("received file revlog group is empty"))
2368 2368 revisions += len(fl) - o
2369 2369 files += 1
2370 2370 if f in needfiles:
2371 2371 needs = needfiles[f]
2372 2372 for new in xrange(o, len(fl)):
2373 2373 n = fl.node(new)
2374 2374 if n in needs:
2375 2375 needs.remove(n)
2376 2376 if not needs:
2377 2377 del needfiles[f]
2378 2378 self.ui.progress(_('files'), None)
2379 2379
2380 2380 for f, needs in needfiles.iteritems():
2381 2381 fl = self.file(f)
2382 2382 for n in needs:
2383 2383 try:
2384 2384 fl.rev(n)
2385 2385 except error.LookupError:
2386 2386 raise util.Abort(
2387 2387 _('missing file data for %s:%s - run hg verify') %
2388 2388 (f, hex(n)))
2389 2389
2390 2390 dh = 0
2391 2391 if oldheads:
2392 2392 heads = cl.heads()
2393 2393 dh = len(heads) - len(oldheads)
2394 2394 for h in heads:
2395 2395 if h not in oldheads and self[h].closesbranch():
2396 2396 dh -= 1
2397 2397 htext = ""
2398 2398 if dh:
2399 2399 htext = _(" (%+d heads)") % dh
2400 2400
2401 2401 self.ui.status(_("added %d changesets"
2402 2402 " with %d changes to %d files%s\n")
2403 2403 % (changesets, revisions, files, htext))
2404 2404
2405 2405 if changesets > 0:
2406 2406 p = lambda: cl.writepending() and self.root or ""
2407 2407 self.hook('pretxnchangegroup', throw=True,
2408 2408 node=hex(cl.node(clstart)), source=srctype,
2409 2409 url=url, pending=p)
2410 2410
2411 2411 added = [cl.node(r) for r in xrange(clstart, clend)]
2412 2412 publishing = self.ui.configbool('phases', 'publish', True)
2413 2413 if srctype == 'push':
2414 2414 # Old server can not push the boundary themself.
2415 2415 # New server won't push the boundary if changeset already
2416 2416 # existed locally as secrete
2417 2417 #
2418 2418 # We should not use added here but the list of all change in
2419 2419 # the bundle
2420 2420 if publishing:
2421 2421 phases.advanceboundary(self, phases.public, srccontent)
2422 2422 else:
2423 2423 phases.advanceboundary(self, phases.draft, srccontent)
2424 2424 phases.retractboundary(self, phases.draft, added)
2425 2425 elif srctype != 'strip':
2426 2426 # publishing only alter behavior during push
2427 2427 #
2428 2428 # strip should not touch boundary at all
2429 2429 phases.retractboundary(self, phases.draft, added)
2430 2430
2431 2431 # make changelog see real files again
2432 2432 cl.finalize(trp)
2433 2433
2434 2434 tr.close()
2435 2435
2436 2436 if changesets > 0:
2437 2437 def runhooks():
2438 2438 # forcefully update the on-disk branch cache
2439 2439 self.ui.debug("updating the branch cache\n")
2440 2440 self.updatebranchcache()
2441 2441 self.hook("changegroup", node=hex(cl.node(clstart)),
2442 2442 source=srctype, url=url)
2443 2443
2444 2444 for n in added:
2445 2445 self.hook("incoming", node=hex(n), source=srctype,
2446 2446 url=url)
2447 2447 self._afterlock(runhooks)
2448 2448
2449 2449 finally:
2450 2450 tr.release()
2451 2451 # never return 0 here:
2452 2452 if dh < 0:
2453 2453 return dh - 1
2454 2454 else:
2455 2455 return dh + 1
2456 2456
2457 2457 def stream_in(self, remote, requirements):
2458 2458 lock = self.lock()
2459 2459 try:
2460 2460 fp = remote.stream_out()
2461 2461 l = fp.readline()
2462 2462 try:
2463 2463 resp = int(l)
2464 2464 except ValueError:
2465 2465 raise error.ResponseError(
2466 2466 _('unexpected response from remote server:'), l)
2467 2467 if resp == 1:
2468 2468 raise util.Abort(_('operation forbidden by server'))
2469 2469 elif resp == 2:
2470 2470 raise util.Abort(_('locking the remote repository failed'))
2471 2471 elif resp != 0:
2472 2472 raise util.Abort(_('the server sent an unknown error code'))
2473 2473 self.ui.status(_('streaming all changes\n'))
2474 2474 l = fp.readline()
2475 2475 try:
2476 2476 total_files, total_bytes = map(int, l.split(' ', 1))
2477 2477 except (ValueError, TypeError):
2478 2478 raise error.ResponseError(
2479 2479 _('unexpected response from remote server:'), l)
2480 2480 self.ui.status(_('%d files to transfer, %s of data\n') %
2481 2481 (total_files, util.bytecount(total_bytes)))
2482 2482 handled_bytes = 0
2483 2483 self.ui.progress(_('clone'), 0, total=total_bytes)
2484 2484 start = time.time()
2485 2485 for i in xrange(total_files):
2486 2486 # XXX doesn't support '\n' or '\r' in filenames
2487 2487 l = fp.readline()
2488 2488 try:
2489 2489 name, size = l.split('\0', 1)
2490 2490 size = int(size)
2491 2491 except (ValueError, TypeError):
2492 2492 raise error.ResponseError(
2493 2493 _('unexpected response from remote server:'), l)
2494 2494 if self.ui.debugflag:
2495 2495 self.ui.debug('adding %s (%s)\n' %
2496 2496 (name, util.bytecount(size)))
2497 2497 # for backwards compat, name was partially encoded
2498 2498 ofp = self.sopener(store.decodedir(name), 'w')
2499 2499 for chunk in util.filechunkiter(fp, limit=size):
2500 2500 handled_bytes += len(chunk)
2501 2501 self.ui.progress(_('clone'), handled_bytes,
2502 2502 total=total_bytes)
2503 2503 ofp.write(chunk)
2504 2504 ofp.close()
2505 2505 elapsed = time.time() - start
2506 2506 if elapsed <= 0:
2507 2507 elapsed = 0.001
2508 2508 self.ui.progress(_('clone'), None)
2509 2509 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2510 2510 (util.bytecount(total_bytes), elapsed,
2511 2511 util.bytecount(total_bytes / elapsed)))
2512 2512
2513 2513 # new requirements = old non-format requirements +
2514 2514 # new format-related
2515 2515 # requirements from the streamed-in repository
2516 2516 requirements.update(set(self.requirements) - self.supportedformats)
2517 2517 self._applyrequirements(requirements)
2518 2518 self._writerequirements()
2519 2519
2520 2520 self.invalidate()
2521 2521 return len(self.heads()) + 1
2522 2522 finally:
2523 2523 lock.release()
2524 2524
2525 2525 def clone(self, remote, heads=[], stream=False):
2526 2526 '''clone remote repository.
2527 2527
2528 2528 keyword arguments:
2529 2529 heads: list of revs to clone (forces use of pull)
2530 2530 stream: use streaming clone if possible'''
2531 2531
2532 2532 # now, all clients that can request uncompressed clones can
2533 2533 # read repo formats supported by all servers that can serve
2534 2534 # them.
2535 2535
2536 2536 # if revlog format changes, client will have to check version
2537 2537 # and format flags on "stream" capability, and use
2538 2538 # uncompressed only if compatible.
2539 2539
2540 2540 if not stream:
2541 2541 # if the server explicitly prefer to stream (for fast LANs)
2542 2542 stream = remote.capable('stream-preferred')
2543 2543
2544 2544 if stream and not heads:
2545 2545 # 'stream' means remote revlog format is revlogv1 only
2546 2546 if remote.capable('stream'):
2547 2547 return self.stream_in(remote, set(('revlogv1',)))
2548 2548 # otherwise, 'streamreqs' contains the remote revlog format
2549 2549 streamreqs = remote.capable('streamreqs')
2550 2550 if streamreqs:
2551 2551 streamreqs = set(streamreqs.split(','))
2552 2552 # if we support it, stream in and adjust our requirements
2553 2553 if not streamreqs - self.supportedformats:
2554 2554 return self.stream_in(remote, streamreqs)
2555 2555 return self.pull(remote, heads)
2556 2556
2557 2557 def pushkey(self, namespace, key, old, new):
2558 2558 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2559 2559 old=old, new=new)
2560 2560 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2561 2561 ret = pushkey.push(self, namespace, key, old, new)
2562 2562 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2563 2563 ret=ret)
2564 2564 return ret
2565 2565
2566 2566 def listkeys(self, namespace):
2567 2567 self.hook('prelistkeys', throw=True, namespace=namespace)
2568 2568 self.ui.debug('listing keys for "%s"\n' % namespace)
2569 2569 values = pushkey.list(self, namespace)
2570 2570 self.hook('listkeys', namespace=namespace, values=values)
2571 2571 return values
2572 2572
2573 2573 def debugwireargs(self, one, two, three=None, four=None, five=None):
2574 2574 '''used to test argument passing over the wire'''
2575 2575 return "%s %s %s %s %s" % (one, two, three, four, five)
2576 2576
2577 2577 def savecommitmessage(self, text):
2578 2578 fp = self.opener('last-message.txt', 'wb')
2579 2579 try:
2580 2580 fp.write(text)
2581 2581 finally:
2582 2582 fp.close()
2583 2583 return self.pathto(fp.name[len(self.root)+1:])
2584 2584
2585 2585 # used to avoid circular references so destructors work
2586 2586 def aftertrans(files):
2587 2587 renamefiles = [tuple(t) for t in files]
2588 2588 def a():
2589 2589 for src, dest in renamefiles:
2590 2590 try:
2591 2591 util.rename(src, dest)
2592 2592 except OSError: # journal file does not yet exist
2593 2593 pass
2594 2594 return a
2595 2595
2596 2596 def undoname(fn):
2597 2597 base, name = os.path.split(fn)
2598 2598 assert name.startswith('journal')
2599 2599 return os.path.join(base, name.replace('journal', 'undo', 1))
2600 2600
2601 2601 def instance(ui, path, create):
2602 2602 return localrepository(ui, util.urllocalpath(path), create)
2603 2603
2604 2604 def islocal(path):
2605 2605 return True
General Comments 0
You need to be logged in to leave comments. Login now