##// END OF EJS Templates
pushkey: splits obsolete marker exchange into multiple keys...
Pierre-Yves David -
r17295:1f08ecc7 stable
parent child Browse files
Show More
@@ -1,2590 +1,2597
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28 28
29 29 class localpeer(peer.peerrepository):
30 30 '''peer for a local repo; reflects only the most recent API'''
31 31
32 32 def __init__(self, repo, caps=MODERNCAPS):
33 33 peer.peerrepository.__init__(self)
34 34 self._repo = repo
35 35 self.ui = repo.ui
36 36 self._caps = repo._restrictcapabilities(caps)
37 37 self.requirements = repo.requirements
38 38 self.supportedformats = repo.supportedformats
39 39
40 40 def close(self):
41 41 self._repo.close()
42 42
43 43 def _capabilities(self):
44 44 return self._caps
45 45
46 46 def local(self):
47 47 return self._repo
48 48
49 49 def canpush(self):
50 50 return True
51 51
52 52 def url(self):
53 53 return self._repo.url()
54 54
55 55 def lookup(self, key):
56 56 return self._repo.lookup(key)
57 57
58 58 def branchmap(self):
59 59 return discovery.visiblebranchmap(self._repo)
60 60
61 61 def heads(self):
62 62 return discovery.visibleheads(self._repo)
63 63
64 64 def known(self, nodes):
65 65 return self._repo.known(nodes)
66 66
67 67 def getbundle(self, source, heads=None, common=None):
68 68 return self._repo.getbundle(source, heads=heads, common=common)
69 69
70 70 # TODO We might want to move the next two calls into legacypeer and add
71 71 # unbundle instead.
72 72
73 73 def lock(self):
74 74 return self._repo.lock()
75 75
76 76 def addchangegroup(self, cg, source, url):
77 77 return self._repo.addchangegroup(cg, source, url)
78 78
79 79 def pushkey(self, namespace, key, old, new):
80 80 return self._repo.pushkey(namespace, key, old, new)
81 81
82 82 def listkeys(self, namespace):
83 83 return self._repo.listkeys(namespace)
84 84
85 85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 86 '''used to test argument passing over the wire'''
87 87 return "%s %s %s %s %s" % (one, two, three, four, five)
88 88
89 89 class locallegacypeer(localpeer):
90 90 '''peer extension which implements legacy methods too; used for tests with
91 91 restricted capabilities'''
92 92
93 93 def __init__(self, repo):
94 94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95 95
96 96 def branches(self, nodes):
97 97 return self._repo.branches(nodes)
98 98
99 99 def between(self, pairs):
100 100 return self._repo.between(pairs)
101 101
102 102 def changegroup(self, basenodes, source):
103 103 return self._repo.changegroup(basenodes, source)
104 104
105 105 def changegroupsubset(self, bases, heads, source):
106 106 return self._repo.changegroupsubset(bases, heads, source)
107 107
108 108 class localrepository(object):
109 109
110 110 supportedformats = set(('revlogv1', 'generaldelta'))
111 111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 112 'dotencode'))
113 113 openerreqs = set(('revlogv1', 'generaldelta'))
114 114 requirements = ['revlogv1']
115 115
116 116 def _baserequirements(self, create):
117 117 return self.requirements[:]
118 118
119 119 def __init__(self, baseui, path=None, create=False):
120 120 self.wopener = scmutil.opener(path, expand=True)
121 121 self.wvfs = self.wopener
122 122 self.root = self.wvfs.base
123 123 self.path = self.wvfs.join(".hg")
124 124 self.origroot = path
125 125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 126 self.opener = scmutil.opener(self.path)
127 127 self.vfs = self.opener
128 128 self.baseui = baseui
129 129 self.ui = baseui.copy()
130 130 # A list of callback to shape the phase if no data were found.
131 131 # Callback are in the form: func(repo, roots) --> processed root.
132 132 # This list it to be filled by extension during repo setup
133 133 self._phasedefaults = []
134 134 try:
135 135 self.ui.readconfig(self.join("hgrc"), self.root)
136 136 extensions.loadall(self.ui)
137 137 except IOError:
138 138 pass
139 139
140 140 if not self.vfs.isdir():
141 141 if create:
142 142 if not self.wvfs.exists():
143 143 self.wvfs.makedirs()
144 144 self.vfs.makedir(notindexed=True)
145 145 requirements = self._baserequirements(create)
146 146 if self.ui.configbool('format', 'usestore', True):
147 147 self.vfs.mkdir("store")
148 148 requirements.append("store")
149 149 if self.ui.configbool('format', 'usefncache', True):
150 150 requirements.append("fncache")
151 151 if self.ui.configbool('format', 'dotencode', True):
152 152 requirements.append('dotencode')
153 153 # create an invalid changelog
154 154 self.vfs.append(
155 155 "00changelog.i",
156 156 '\0\0\0\2' # represents revlogv2
157 157 ' dummy changelog to prevent using the old repo layout'
158 158 )
159 159 if self.ui.configbool('format', 'generaldelta', False):
160 160 requirements.append("generaldelta")
161 161 requirements = set(requirements)
162 162 else:
163 163 raise error.RepoError(_("repository %s not found") % path)
164 164 elif create:
165 165 raise error.RepoError(_("repository %s already exists") % path)
166 166 else:
167 167 try:
168 168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 169 except IOError, inst:
170 170 if inst.errno != errno.ENOENT:
171 171 raise
172 172 requirements = set()
173 173
174 174 self.sharedpath = self.path
175 175 try:
176 176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 177 if not os.path.exists(s):
178 178 raise error.RepoError(
179 179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 180 self.sharedpath = s
181 181 except IOError, inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184
185 185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
186 186 self.spath = self.store.path
187 187 self.sopener = self.store.opener
188 188 self.svfs = self.sopener
189 189 self.sjoin = self.store.join
190 190 self.opener.createmode = self.store.createmode
191 191 self._applyrequirements(requirements)
192 192 if create:
193 193 self._writerequirements()
194 194
195 195
196 196 self._branchcache = None
197 197 self._branchcachetip = None
198 198 self.filterpats = {}
199 199 self._datafilters = {}
200 200 self._transref = self._lockref = self._wlockref = None
201 201
202 202 # A cache for various files under .hg/ that tracks file changes,
203 203 # (used by the filecache decorator)
204 204 #
205 205 # Maps a property name to its util.filecacheentry
206 206 self._filecache = {}
207 207
208 208 def close(self):
209 209 pass
210 210
211 211 def _restrictcapabilities(self, caps):
212 212 return caps
213 213
214 214 def _applyrequirements(self, requirements):
215 215 self.requirements = requirements
216 216 self.sopener.options = dict((r, 1) for r in requirements
217 217 if r in self.openerreqs)
218 218
219 219 def _writerequirements(self):
220 220 reqfile = self.opener("requires", "w")
221 221 for r in self.requirements:
222 222 reqfile.write("%s\n" % r)
223 223 reqfile.close()
224 224
225 225 def _checknested(self, path):
226 226 """Determine if path is a legal nested repository."""
227 227 if not path.startswith(self.root):
228 228 return False
229 229 subpath = path[len(self.root) + 1:]
230 230 normsubpath = util.pconvert(subpath)
231 231
232 232 # XXX: Checking against the current working copy is wrong in
233 233 # the sense that it can reject things like
234 234 #
235 235 # $ hg cat -r 10 sub/x.txt
236 236 #
237 237 # if sub/ is no longer a subrepository in the working copy
238 238 # parent revision.
239 239 #
240 240 # However, it can of course also allow things that would have
241 241 # been rejected before, such as the above cat command if sub/
242 242 # is a subrepository now, but was a normal directory before.
243 243 # The old path auditor would have rejected by mistake since it
244 244 # panics when it sees sub/.hg/.
245 245 #
246 246 # All in all, checking against the working copy seems sensible
247 247 # since we want to prevent access to nested repositories on
248 248 # the filesystem *now*.
249 249 ctx = self[None]
250 250 parts = util.splitpath(subpath)
251 251 while parts:
252 252 prefix = '/'.join(parts)
253 253 if prefix in ctx.substate:
254 254 if prefix == normsubpath:
255 255 return True
256 256 else:
257 257 sub = ctx.sub(prefix)
258 258 return sub.checknested(subpath[len(prefix) + 1:])
259 259 else:
260 260 parts.pop()
261 261 return False
262 262
263 263 def peer(self):
264 264 return localpeer(self) # not cached to avoid reference cycle
265 265
266 266 @filecache('bookmarks')
267 267 def _bookmarks(self):
268 268 return bookmarks.read(self)
269 269
270 270 @filecache('bookmarks.current')
271 271 def _bookmarkcurrent(self):
272 272 return bookmarks.readcurrent(self)
273 273
274 274 def _writebookmarks(self, marks):
275 275 bookmarks.write(self)
276 276
277 277 def bookmarkheads(self, bookmark):
278 278 name = bookmark.split('@', 1)[0]
279 279 heads = []
280 280 for mark, n in self._bookmarks.iteritems():
281 281 if mark.split('@', 1)[0] == name:
282 282 heads.append(n)
283 283 return heads
284 284
285 285 @storecache('phaseroots')
286 286 def _phasecache(self):
287 287 return phases.phasecache(self, self._phasedefaults)
288 288
289 289 @storecache('obsstore')
290 290 def obsstore(self):
291 291 store = obsolete.obsstore(self.sopener)
292 292 return store
293 293
294 294 @propertycache
295 295 def hiddenrevs(self):
296 296 """hiddenrevs: revs that should be hidden by command and tools
297 297
298 298 This set is carried on the repo to ease initialisation and lazy
299 299 loading it'll probably move back to changelog for efficienty and
300 300 consistency reason
301 301
302 302 Note that the hiddenrevs will needs invalidations when
303 303 - a new changesets is added (possible unstable above extinct)
304 304 - a new obsolete marker is added (possible new extinct changeset)
305 305 """
306 306 hidden = set()
307 307 if self.obsstore:
308 308 ### hide extinct changeset that are not accessible by any mean
309 309 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
310 310 hidden.update(self.revs(hiddenquery))
311 311 return hidden
312 312
313 313 @storecache('00changelog.i')
314 314 def changelog(self):
315 315 c = changelog.changelog(self.sopener)
316 316 if 'HG_PENDING' in os.environ:
317 317 p = os.environ['HG_PENDING']
318 318 if p.startswith(self.root):
319 319 c.readpending('00changelog.i.a')
320 320 return c
321 321
322 322 @storecache('00manifest.i')
323 323 def manifest(self):
324 324 return manifest.manifest(self.sopener)
325 325
326 326 @filecache('dirstate')
327 327 def dirstate(self):
328 328 warned = [0]
329 329 def validate(node):
330 330 try:
331 331 self.changelog.rev(node)
332 332 return node
333 333 except error.LookupError:
334 334 if not warned[0]:
335 335 warned[0] = True
336 336 self.ui.warn(_("warning: ignoring unknown"
337 337 " working parent %s!\n") % short(node))
338 338 return nullid
339 339
340 340 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
341 341
342 342 def __getitem__(self, changeid):
343 343 if changeid is None:
344 344 return context.workingctx(self)
345 345 return context.changectx(self, changeid)
346 346
347 347 def __contains__(self, changeid):
348 348 try:
349 349 return bool(self.lookup(changeid))
350 350 except error.RepoLookupError:
351 351 return False
352 352
353 353 def __nonzero__(self):
354 354 return True
355 355
356 356 def __len__(self):
357 357 return len(self.changelog)
358 358
359 359 def __iter__(self):
360 360 for i in xrange(len(self)):
361 361 yield i
362 362
363 363 def revs(self, expr, *args):
364 364 '''Return a list of revisions matching the given revset'''
365 365 expr = revset.formatspec(expr, *args)
366 366 m = revset.match(None, expr)
367 367 return [r for r in m(self, range(len(self)))]
368 368
369 369 def set(self, expr, *args):
370 370 '''
371 371 Yield a context for each matching revision, after doing arg
372 372 replacement via revset.formatspec
373 373 '''
374 374 for r in self.revs(expr, *args):
375 375 yield self[r]
376 376
377 377 def url(self):
378 378 return 'file:' + self.root
379 379
380 380 def hook(self, name, throw=False, **args):
381 381 return hook.hook(self.ui, self, name, throw, **args)
382 382
383 383 tag_disallowed = ':\r\n'
384 384
385 385 def _tag(self, names, node, message, local, user, date, extra={}):
386 386 if isinstance(names, str):
387 387 allchars = names
388 388 names = (names,)
389 389 else:
390 390 allchars = ''.join(names)
391 391 for c in self.tag_disallowed:
392 392 if c in allchars:
393 393 raise util.Abort(_('%r cannot be used in a tag name') % c)
394 394
395 395 branches = self.branchmap()
396 396 for name in names:
397 397 self.hook('pretag', throw=True, node=hex(node), tag=name,
398 398 local=local)
399 399 if name in branches:
400 400 self.ui.warn(_("warning: tag %s conflicts with existing"
401 401 " branch name\n") % name)
402 402
403 403 def writetags(fp, names, munge, prevtags):
404 404 fp.seek(0, 2)
405 405 if prevtags and prevtags[-1] != '\n':
406 406 fp.write('\n')
407 407 for name in names:
408 408 m = munge and munge(name) or name
409 409 if (self._tagscache.tagtypes and
410 410 name in self._tagscache.tagtypes):
411 411 old = self.tags().get(name, nullid)
412 412 fp.write('%s %s\n' % (hex(old), m))
413 413 fp.write('%s %s\n' % (hex(node), m))
414 414 fp.close()
415 415
416 416 prevtags = ''
417 417 if local:
418 418 try:
419 419 fp = self.opener('localtags', 'r+')
420 420 except IOError:
421 421 fp = self.opener('localtags', 'a')
422 422 else:
423 423 prevtags = fp.read()
424 424
425 425 # local tags are stored in the current charset
426 426 writetags(fp, names, None, prevtags)
427 427 for name in names:
428 428 self.hook('tag', node=hex(node), tag=name, local=local)
429 429 return
430 430
431 431 try:
432 432 fp = self.wfile('.hgtags', 'rb+')
433 433 except IOError, e:
434 434 if e.errno != errno.ENOENT:
435 435 raise
436 436 fp = self.wfile('.hgtags', 'ab')
437 437 else:
438 438 prevtags = fp.read()
439 439
440 440 # committed tags are stored in UTF-8
441 441 writetags(fp, names, encoding.fromlocal, prevtags)
442 442
443 443 fp.close()
444 444
445 445 self.invalidatecaches()
446 446
447 447 if '.hgtags' not in self.dirstate:
448 448 self[None].add(['.hgtags'])
449 449
450 450 m = matchmod.exact(self.root, '', ['.hgtags'])
451 451 tagnode = self.commit(message, user, date, extra=extra, match=m)
452 452
453 453 for name in names:
454 454 self.hook('tag', node=hex(node), tag=name, local=local)
455 455
456 456 return tagnode
457 457
458 458 def tag(self, names, node, message, local, user, date):
459 459 '''tag a revision with one or more symbolic names.
460 460
461 461 names is a list of strings or, when adding a single tag, names may be a
462 462 string.
463 463
464 464 if local is True, the tags are stored in a per-repository file.
465 465 otherwise, they are stored in the .hgtags file, and a new
466 466 changeset is committed with the change.
467 467
468 468 keyword arguments:
469 469
470 470 local: whether to store tags in non-version-controlled file
471 471 (default False)
472 472
473 473 message: commit message to use if committing
474 474
475 475 user: name of user to use if committing
476 476
477 477 date: date tuple to use if committing'''
478 478
479 479 if not local:
480 480 for x in self.status()[:5]:
481 481 if '.hgtags' in x:
482 482 raise util.Abort(_('working copy of .hgtags is changed '
483 483 '(please commit .hgtags manually)'))
484 484
485 485 self.tags() # instantiate the cache
486 486 self._tag(names, node, message, local, user, date)
487 487
488 488 @propertycache
489 489 def _tagscache(self):
490 490 '''Returns a tagscache object that contains various tags related
491 491 caches.'''
492 492
493 493 # This simplifies its cache management by having one decorated
494 494 # function (this one) and the rest simply fetch things from it.
495 495 class tagscache(object):
496 496 def __init__(self):
497 497 # These two define the set of tags for this repository. tags
498 498 # maps tag name to node; tagtypes maps tag name to 'global' or
499 499 # 'local'. (Global tags are defined by .hgtags across all
500 500 # heads, and local tags are defined in .hg/localtags.)
501 501 # They constitute the in-memory cache of tags.
502 502 self.tags = self.tagtypes = None
503 503
504 504 self.nodetagscache = self.tagslist = None
505 505
506 506 cache = tagscache()
507 507 cache.tags, cache.tagtypes = self._findtags()
508 508
509 509 return cache
510 510
511 511 def tags(self):
512 512 '''return a mapping of tag to node'''
513 513 t = {}
514 514 for k, v in self._tagscache.tags.iteritems():
515 515 try:
516 516 # ignore tags to unknown nodes
517 517 self.changelog.rev(v)
518 518 t[k] = v
519 519 except (error.LookupError, ValueError):
520 520 pass
521 521 return t
522 522
523 523 def _findtags(self):
524 524 '''Do the hard work of finding tags. Return a pair of dicts
525 525 (tags, tagtypes) where tags maps tag name to node, and tagtypes
526 526 maps tag name to a string like \'global\' or \'local\'.
527 527 Subclasses or extensions are free to add their own tags, but
528 528 should be aware that the returned dicts will be retained for the
529 529 duration of the localrepo object.'''
530 530
531 531 # XXX what tagtype should subclasses/extensions use? Currently
532 532 # mq and bookmarks add tags, but do not set the tagtype at all.
533 533 # Should each extension invent its own tag type? Should there
534 534 # be one tagtype for all such "virtual" tags? Or is the status
535 535 # quo fine?
536 536
537 537 alltags = {} # map tag name to (node, hist)
538 538 tagtypes = {}
539 539
540 540 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
541 541 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
542 542
543 543 # Build the return dicts. Have to re-encode tag names because
544 544 # the tags module always uses UTF-8 (in order not to lose info
545 545 # writing to the cache), but the rest of Mercurial wants them in
546 546 # local encoding.
547 547 tags = {}
548 548 for (name, (node, hist)) in alltags.iteritems():
549 549 if node != nullid:
550 550 tags[encoding.tolocal(name)] = node
551 551 tags['tip'] = self.changelog.tip()
552 552 tagtypes = dict([(encoding.tolocal(name), value)
553 553 for (name, value) in tagtypes.iteritems()])
554 554 return (tags, tagtypes)
555 555
556 556 def tagtype(self, tagname):
557 557 '''
558 558 return the type of the given tag. result can be:
559 559
560 560 'local' : a local tag
561 561 'global' : a global tag
562 562 None : tag does not exist
563 563 '''
564 564
565 565 return self._tagscache.tagtypes.get(tagname)
566 566
567 567 def tagslist(self):
568 568 '''return a list of tags ordered by revision'''
569 569 if not self._tagscache.tagslist:
570 570 l = []
571 571 for t, n in self.tags().iteritems():
572 572 r = self.changelog.rev(n)
573 573 l.append((r, t, n))
574 574 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
575 575
576 576 return self._tagscache.tagslist
577 577
578 578 def nodetags(self, node):
579 579 '''return the tags associated with a node'''
580 580 if not self._tagscache.nodetagscache:
581 581 nodetagscache = {}
582 582 for t, n in self._tagscache.tags.iteritems():
583 583 nodetagscache.setdefault(n, []).append(t)
584 584 for tags in nodetagscache.itervalues():
585 585 tags.sort()
586 586 self._tagscache.nodetagscache = nodetagscache
587 587 return self._tagscache.nodetagscache.get(node, [])
588 588
589 589 def nodebookmarks(self, node):
590 590 marks = []
591 591 for bookmark, n in self._bookmarks.iteritems():
592 592 if n == node:
593 593 marks.append(bookmark)
594 594 return sorted(marks)
595 595
596 596 def _branchtags(self, partial, lrev):
597 597 # TODO: rename this function?
598 598 tiprev = len(self) - 1
599 599 if lrev != tiprev:
600 600 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
601 601 self._updatebranchcache(partial, ctxgen)
602 602 self._writebranchcache(partial, self.changelog.tip(), tiprev)
603 603
604 604 return partial
605 605
606 606 def updatebranchcache(self):
607 607 tip = self.changelog.tip()
608 608 if self._branchcache is not None and self._branchcachetip == tip:
609 609 return
610 610
611 611 oldtip = self._branchcachetip
612 612 self._branchcachetip = tip
613 613 if oldtip is None or oldtip not in self.changelog.nodemap:
614 614 partial, last, lrev = self._readbranchcache()
615 615 else:
616 616 lrev = self.changelog.rev(oldtip)
617 617 partial = self._branchcache
618 618
619 619 self._branchtags(partial, lrev)
620 620 # this private cache holds all heads (not just the branch tips)
621 621 self._branchcache = partial
622 622
623 623 def branchmap(self):
624 624 '''returns a dictionary {branch: [branchheads]}'''
625 625 self.updatebranchcache()
626 626 return self._branchcache
627 627
628 628 def _branchtip(self, heads):
629 629 '''return the tipmost branch head in heads'''
630 630 tip = heads[-1]
631 631 for h in reversed(heads):
632 632 if not self[h].closesbranch():
633 633 tip = h
634 634 break
635 635 return tip
636 636
637 637 def branchtip(self, branch):
638 638 '''return the tip node for a given branch'''
639 639 if branch not in self.branchmap():
640 640 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
641 641 return self._branchtip(self.branchmap()[branch])
642 642
643 643 def branchtags(self):
644 644 '''return a dict where branch names map to the tipmost head of
645 645 the branch, open heads come before closed'''
646 646 bt = {}
647 647 for bn, heads in self.branchmap().iteritems():
648 648 bt[bn] = self._branchtip(heads)
649 649 return bt
650 650
651 651 def _readbranchcache(self):
652 652 partial = {}
653 653 try:
654 654 f = self.opener("cache/branchheads")
655 655 lines = f.read().split('\n')
656 656 f.close()
657 657 except (IOError, OSError):
658 658 return {}, nullid, nullrev
659 659
660 660 try:
661 661 last, lrev = lines.pop(0).split(" ", 1)
662 662 last, lrev = bin(last), int(lrev)
663 663 if lrev >= len(self) or self[lrev].node() != last:
664 664 # invalidate the cache
665 665 raise ValueError('invalidating branch cache (tip differs)')
666 666 for l in lines:
667 667 if not l:
668 668 continue
669 669 node, label = l.split(" ", 1)
670 670 label = encoding.tolocal(label.strip())
671 671 if not node in self:
672 672 raise ValueError('invalidating branch cache because node '+
673 673 '%s does not exist' % node)
674 674 partial.setdefault(label, []).append(bin(node))
675 675 except KeyboardInterrupt:
676 676 raise
677 677 except Exception, inst:
678 678 if self.ui.debugflag:
679 679 self.ui.warn(str(inst), '\n')
680 680 partial, last, lrev = {}, nullid, nullrev
681 681 return partial, last, lrev
682 682
683 683 def _writebranchcache(self, branches, tip, tiprev):
684 684 try:
685 685 f = self.opener("cache/branchheads", "w", atomictemp=True)
686 686 f.write("%s %s\n" % (hex(tip), tiprev))
687 687 for label, nodes in branches.iteritems():
688 688 for node in nodes:
689 689 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
690 690 f.close()
691 691 except (IOError, OSError):
692 692 pass
693 693
694 694 def _updatebranchcache(self, partial, ctxgen):
695 695 """Given a branchhead cache, partial, that may have extra nodes or be
696 696 missing heads, and a generator of nodes that are at least a superset of
697 697 heads missing, this function updates partial to be correct.
698 698 """
699 699 # collect new branch entries
700 700 newbranches = {}
701 701 for c in ctxgen:
702 702 newbranches.setdefault(c.branch(), []).append(c.node())
703 703 # if older branchheads are reachable from new ones, they aren't
704 704 # really branchheads. Note checking parents is insufficient:
705 705 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
706 706 for branch, newnodes in newbranches.iteritems():
707 707 bheads = partial.setdefault(branch, [])
708 708 # Remove candidate heads that no longer are in the repo (e.g., as
709 709 # the result of a strip that just happened). Avoid using 'node in
710 710 # self' here because that dives down into branchcache code somewhat
711 711 # recrusively.
712 712 bheadrevs = [self.changelog.rev(node) for node in bheads
713 713 if self.changelog.hasnode(node)]
714 714 newheadrevs = [self.changelog.rev(node) for node in newnodes
715 715 if self.changelog.hasnode(node)]
716 716 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
717 717 # Remove duplicates - nodes that are in newheadrevs and are already
718 718 # in bheadrevs. This can happen if you strip a node whose parent
719 719 # was already a head (because they're on different branches).
720 720 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
721 721
722 722 # Starting from tip means fewer passes over reachable. If we know
723 723 # the new candidates are not ancestors of existing heads, we don't
724 724 # have to examine ancestors of existing heads
725 725 if ctxisnew:
726 726 iterrevs = sorted(newheadrevs)
727 727 else:
728 728 iterrevs = list(bheadrevs)
729 729
730 730 # This loop prunes out two kinds of heads - heads that are
731 731 # superceded by a head in newheadrevs, and newheadrevs that are not
732 732 # heads because an existing head is their descendant.
733 733 while iterrevs:
734 734 latest = iterrevs.pop()
735 735 if latest not in bheadrevs:
736 736 continue
737 737 ancestors = set(self.changelog.ancestors([latest],
738 738 bheadrevs[0]))
739 739 if ancestors:
740 740 bheadrevs = [b for b in bheadrevs if b not in ancestors]
741 741 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
742 742
743 743 # There may be branches that cease to exist when the last commit in the
744 744 # branch was stripped. This code filters them out. Note that the
745 745 # branch that ceased to exist may not be in newbranches because
746 746 # newbranches is the set of candidate heads, which when you strip the
747 747 # last commit in a branch will be the parent branch.
748 748 for branch in partial.keys():
749 749 nodes = [head for head in partial[branch]
750 750 if self.changelog.hasnode(head)]
751 751 if not nodes:
752 752 del partial[branch]
753 753
754 754 def lookup(self, key):
755 755 return self[key].node()
756 756
757 757 def lookupbranch(self, key, remote=None):
758 758 repo = remote or self
759 759 if key in repo.branchmap():
760 760 return key
761 761
762 762 repo = (remote and remote.local()) and remote or self
763 763 return repo[key].branch()
764 764
765 765 def known(self, nodes):
766 766 nm = self.changelog.nodemap
767 767 pc = self._phasecache
768 768 result = []
769 769 for n in nodes:
770 770 r = nm.get(n)
771 771 resp = not (r is None or pc.phase(self, r) >= phases.secret)
772 772 result.append(resp)
773 773 return result
774 774
775 775 def local(self):
776 776 return self
777 777
778 778 def cancopy(self):
779 779 return self.local() # so statichttprepo's override of local() works
780 780
781 781 def join(self, f):
782 782 return os.path.join(self.path, f)
783 783
784 784 def wjoin(self, f):
785 785 return os.path.join(self.root, f)
786 786
787 787 def file(self, f):
788 788 if f[0] == '/':
789 789 f = f[1:]
790 790 return filelog.filelog(self.sopener, f)
791 791
792 792 def changectx(self, changeid):
793 793 return self[changeid]
794 794
795 795 def parents(self, changeid=None):
796 796 '''get list of changectxs for parents of changeid'''
797 797 return self[changeid].parents()
798 798
799 799 def setparents(self, p1, p2=nullid):
800 800 copies = self.dirstate.setparents(p1, p2)
801 801 if copies:
802 802 # Adjust copy records, the dirstate cannot do it, it
803 803 # requires access to parents manifests. Preserve them
804 804 # only for entries added to first parent.
805 805 pctx = self[p1]
806 806 for f in copies:
807 807 if f not in pctx and copies[f] in pctx:
808 808 self.dirstate.copy(copies[f], f)
809 809
810 810 def filectx(self, path, changeid=None, fileid=None):
811 811 """changeid can be a changeset revision, node, or tag.
812 812 fileid can be a file revision or node."""
813 813 return context.filectx(self, path, changeid, fileid)
814 814
815 815 def getcwd(self):
816 816 return self.dirstate.getcwd()
817 817
818 818 def pathto(self, f, cwd=None):
819 819 return self.dirstate.pathto(f, cwd)
820 820
821 821 def wfile(self, f, mode='r'):
822 822 return self.wopener(f, mode)
823 823
824 824 def _link(self, f):
825 825 return os.path.islink(self.wjoin(f))
826 826
827 827 def _loadfilter(self, filter):
828 828 if filter not in self.filterpats:
829 829 l = []
830 830 for pat, cmd in self.ui.configitems(filter):
831 831 if cmd == '!':
832 832 continue
833 833 mf = matchmod.match(self.root, '', [pat])
834 834 fn = None
835 835 params = cmd
836 836 for name, filterfn in self._datafilters.iteritems():
837 837 if cmd.startswith(name):
838 838 fn = filterfn
839 839 params = cmd[len(name):].lstrip()
840 840 break
841 841 if not fn:
842 842 fn = lambda s, c, **kwargs: util.filter(s, c)
843 843 # Wrap old filters not supporting keyword arguments
844 844 if not inspect.getargspec(fn)[2]:
845 845 oldfn = fn
846 846 fn = lambda s, c, **kwargs: oldfn(s, c)
847 847 l.append((mf, fn, params))
848 848 self.filterpats[filter] = l
849 849 return self.filterpats[filter]
850 850
851 851 def _filter(self, filterpats, filename, data):
852 852 for mf, fn, cmd in filterpats:
853 853 if mf(filename):
854 854 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
855 855 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
856 856 break
857 857
858 858 return data
859 859
860 860 @propertycache
861 861 def _encodefilterpats(self):
862 862 return self._loadfilter('encode')
863 863
864 864 @propertycache
865 865 def _decodefilterpats(self):
866 866 return self._loadfilter('decode')
867 867
868 868 def adddatafilter(self, name, filter):
869 869 self._datafilters[name] = filter
870 870
871 871 def wread(self, filename):
872 872 if self._link(filename):
873 873 data = os.readlink(self.wjoin(filename))
874 874 else:
875 875 data = self.wopener.read(filename)
876 876 return self._filter(self._encodefilterpats, filename, data)
877 877
878 878 def wwrite(self, filename, data, flags):
879 879 data = self._filter(self._decodefilterpats, filename, data)
880 880 if 'l' in flags:
881 881 self.wopener.symlink(data, filename)
882 882 else:
883 883 self.wopener.write(filename, data)
884 884 if 'x' in flags:
885 885 util.setflags(self.wjoin(filename), False, True)
886 886
887 887 def wwritedata(self, filename, data):
888 888 return self._filter(self._decodefilterpats, filename, data)
889 889
890 890 def transaction(self, desc):
891 891 tr = self._transref and self._transref() or None
892 892 if tr and tr.running():
893 893 return tr.nest()
894 894
895 895 # abort here if the journal already exists
896 896 if os.path.exists(self.sjoin("journal")):
897 897 raise error.RepoError(
898 898 _("abandoned transaction found - run hg recover"))
899 899
900 900 self._writejournal(desc)
901 901 renames = [(x, undoname(x)) for x in self._journalfiles()]
902 902
903 903 tr = transaction.transaction(self.ui.warn, self.sopener,
904 904 self.sjoin("journal"),
905 905 aftertrans(renames),
906 906 self.store.createmode)
907 907 self._transref = weakref.ref(tr)
908 908 return tr
909 909
910 910 def _journalfiles(self):
911 911 return (self.sjoin('journal'), self.join('journal.dirstate'),
912 912 self.join('journal.branch'), self.join('journal.desc'),
913 913 self.join('journal.bookmarks'),
914 914 self.sjoin('journal.phaseroots'))
915 915
916 916 def undofiles(self):
917 917 return [undoname(x) for x in self._journalfiles()]
918 918
919 919 def _writejournal(self, desc):
920 920 self.opener.write("journal.dirstate",
921 921 self.opener.tryread("dirstate"))
922 922 self.opener.write("journal.branch",
923 923 encoding.fromlocal(self.dirstate.branch()))
924 924 self.opener.write("journal.desc",
925 925 "%d\n%s\n" % (len(self), desc))
926 926 self.opener.write("journal.bookmarks",
927 927 self.opener.tryread("bookmarks"))
928 928 self.sopener.write("journal.phaseroots",
929 929 self.sopener.tryread("phaseroots"))
930 930
931 931 def recover(self):
932 932 lock = self.lock()
933 933 try:
934 934 if os.path.exists(self.sjoin("journal")):
935 935 self.ui.status(_("rolling back interrupted transaction\n"))
936 936 transaction.rollback(self.sopener, self.sjoin("journal"),
937 937 self.ui.warn)
938 938 self.invalidate()
939 939 return True
940 940 else:
941 941 self.ui.warn(_("no interrupted transaction available\n"))
942 942 return False
943 943 finally:
944 944 lock.release()
945 945
946 946 def rollback(self, dryrun=False, force=False):
947 947 wlock = lock = None
948 948 try:
949 949 wlock = self.wlock()
950 950 lock = self.lock()
951 951 if os.path.exists(self.sjoin("undo")):
952 952 return self._rollback(dryrun, force)
953 953 else:
954 954 self.ui.warn(_("no rollback information available\n"))
955 955 return 1
956 956 finally:
957 957 release(lock, wlock)
958 958
959 959 def _rollback(self, dryrun, force):
960 960 ui = self.ui
961 961 try:
962 962 args = self.opener.read('undo.desc').splitlines()
963 963 (oldlen, desc, detail) = (int(args[0]), args[1], None)
964 964 if len(args) >= 3:
965 965 detail = args[2]
966 966 oldtip = oldlen - 1
967 967
968 968 if detail and ui.verbose:
969 969 msg = (_('repository tip rolled back to revision %s'
970 970 ' (undo %s: %s)\n')
971 971 % (oldtip, desc, detail))
972 972 else:
973 973 msg = (_('repository tip rolled back to revision %s'
974 974 ' (undo %s)\n')
975 975 % (oldtip, desc))
976 976 except IOError:
977 977 msg = _('rolling back unknown transaction\n')
978 978 desc = None
979 979
980 980 if not force and self['.'] != self['tip'] and desc == 'commit':
981 981 raise util.Abort(
982 982 _('rollback of last commit while not checked out '
983 983 'may lose data'), hint=_('use -f to force'))
984 984
985 985 ui.status(msg)
986 986 if dryrun:
987 987 return 0
988 988
989 989 parents = self.dirstate.parents()
990 990 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
991 991 if os.path.exists(self.join('undo.bookmarks')):
992 992 util.rename(self.join('undo.bookmarks'),
993 993 self.join('bookmarks'))
994 994 if os.path.exists(self.sjoin('undo.phaseroots')):
995 995 util.rename(self.sjoin('undo.phaseroots'),
996 996 self.sjoin('phaseroots'))
997 997 self.invalidate()
998 998
999 999 parentgone = (parents[0] not in self.changelog.nodemap or
1000 1000 parents[1] not in self.changelog.nodemap)
1001 1001 if parentgone:
1002 1002 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1003 1003 try:
1004 1004 branch = self.opener.read('undo.branch')
1005 1005 self.dirstate.setbranch(branch)
1006 1006 except IOError:
1007 1007 ui.warn(_('named branch could not be reset: '
1008 1008 'current branch is still \'%s\'\n')
1009 1009 % self.dirstate.branch())
1010 1010
1011 1011 self.dirstate.invalidate()
1012 1012 parents = tuple([p.rev() for p in self.parents()])
1013 1013 if len(parents) > 1:
1014 1014 ui.status(_('working directory now based on '
1015 1015 'revisions %d and %d\n') % parents)
1016 1016 else:
1017 1017 ui.status(_('working directory now based on '
1018 1018 'revision %d\n') % parents)
1019 1019 # TODO: if we know which new heads may result from this rollback, pass
1020 1020 # them to destroy(), which will prevent the branchhead cache from being
1021 1021 # invalidated.
1022 1022 self.destroyed()
1023 1023 return 0
1024 1024
1025 1025 def invalidatecaches(self):
1026 1026 def delcache(name):
1027 1027 try:
1028 1028 delattr(self, name)
1029 1029 except AttributeError:
1030 1030 pass
1031 1031
1032 1032 delcache('_tagscache')
1033 1033
1034 1034 self._branchcache = None # in UTF-8
1035 1035 self._branchcachetip = None
1036 1036
1037 1037 def invalidatedirstate(self):
1038 1038 '''Invalidates the dirstate, causing the next call to dirstate
1039 1039 to check if it was modified since the last time it was read,
1040 1040 rereading it if it has.
1041 1041
1042 1042 This is different to dirstate.invalidate() that it doesn't always
1043 1043 rereads the dirstate. Use dirstate.invalidate() if you want to
1044 1044 explicitly read the dirstate again (i.e. restoring it to a previous
1045 1045 known good state).'''
1046 1046 if 'dirstate' in self.__dict__:
1047 1047 for k in self.dirstate._filecache:
1048 1048 try:
1049 1049 delattr(self.dirstate, k)
1050 1050 except AttributeError:
1051 1051 pass
1052 1052 delattr(self, 'dirstate')
1053 1053
1054 1054 def invalidate(self):
1055 1055 for k in self._filecache:
1056 1056 # dirstate is invalidated separately in invalidatedirstate()
1057 1057 if k == 'dirstate':
1058 1058 continue
1059 1059
1060 1060 try:
1061 1061 delattr(self, k)
1062 1062 except AttributeError:
1063 1063 pass
1064 1064 self.invalidatecaches()
1065 1065
1066 1066 # Discard all cache entries to force reloading everything.
1067 1067 self._filecache.clear()
1068 1068
1069 1069 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1070 1070 try:
1071 1071 l = lock.lock(lockname, 0, releasefn, desc=desc)
1072 1072 except error.LockHeld, inst:
1073 1073 if not wait:
1074 1074 raise
1075 1075 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1076 1076 (desc, inst.locker))
1077 1077 # default to 600 seconds timeout
1078 1078 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1079 1079 releasefn, desc=desc)
1080 1080 if acquirefn:
1081 1081 acquirefn()
1082 1082 return l
1083 1083
1084 1084 def _afterlock(self, callback):
1085 1085 """add a callback to the current repository lock.
1086 1086
1087 1087 The callback will be executed on lock release."""
1088 1088 l = self._lockref and self._lockref()
1089 1089 if l:
1090 1090 l.postrelease.append(callback)
1091 1091 else:
1092 1092 callback()
1093 1093
1094 1094 def lock(self, wait=True):
1095 1095 '''Lock the repository store (.hg/store) and return a weak reference
1096 1096 to the lock. Use this before modifying the store (e.g. committing or
1097 1097 stripping). If you are opening a transaction, get a lock as well.)'''
1098 1098 l = self._lockref and self._lockref()
1099 1099 if l is not None and l.held:
1100 1100 l.lock()
1101 1101 return l
1102 1102
1103 1103 def unlock():
1104 1104 self.store.write()
1105 1105 if '_phasecache' in vars(self):
1106 1106 self._phasecache.write()
1107 1107 for k, ce in self._filecache.items():
1108 1108 if k == 'dirstate':
1109 1109 continue
1110 1110 ce.refresh()
1111 1111
1112 1112 l = self._lock(self.sjoin("lock"), wait, unlock,
1113 1113 self.invalidate, _('repository %s') % self.origroot)
1114 1114 self._lockref = weakref.ref(l)
1115 1115 return l
1116 1116
1117 1117 def wlock(self, wait=True):
1118 1118 '''Lock the non-store parts of the repository (everything under
1119 1119 .hg except .hg/store) and return a weak reference to the lock.
1120 1120 Use this before modifying files in .hg.'''
1121 1121 l = self._wlockref and self._wlockref()
1122 1122 if l is not None and l.held:
1123 1123 l.lock()
1124 1124 return l
1125 1125
1126 1126 def unlock():
1127 1127 self.dirstate.write()
1128 1128 ce = self._filecache.get('dirstate')
1129 1129 if ce:
1130 1130 ce.refresh()
1131 1131
1132 1132 l = self._lock(self.join("wlock"), wait, unlock,
1133 1133 self.invalidatedirstate, _('working directory of %s') %
1134 1134 self.origroot)
1135 1135 self._wlockref = weakref.ref(l)
1136 1136 return l
1137 1137
1138 1138 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1139 1139 """
1140 1140 commit an individual file as part of a larger transaction
1141 1141 """
1142 1142
1143 1143 fname = fctx.path()
1144 1144 text = fctx.data()
1145 1145 flog = self.file(fname)
1146 1146 fparent1 = manifest1.get(fname, nullid)
1147 1147 fparent2 = fparent2o = manifest2.get(fname, nullid)
1148 1148
1149 1149 meta = {}
1150 1150 copy = fctx.renamed()
1151 1151 if copy and copy[0] != fname:
1152 1152 # Mark the new revision of this file as a copy of another
1153 1153 # file. This copy data will effectively act as a parent
1154 1154 # of this new revision. If this is a merge, the first
1155 1155 # parent will be the nullid (meaning "look up the copy data")
1156 1156 # and the second one will be the other parent. For example:
1157 1157 #
1158 1158 # 0 --- 1 --- 3 rev1 changes file foo
1159 1159 # \ / rev2 renames foo to bar and changes it
1160 1160 # \- 2 -/ rev3 should have bar with all changes and
1161 1161 # should record that bar descends from
1162 1162 # bar in rev2 and foo in rev1
1163 1163 #
1164 1164 # this allows this merge to succeed:
1165 1165 #
1166 1166 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1167 1167 # \ / merging rev3 and rev4 should use bar@rev2
1168 1168 # \- 2 --- 4 as the merge base
1169 1169 #
1170 1170
1171 1171 cfname = copy[0]
1172 1172 crev = manifest1.get(cfname)
1173 1173 newfparent = fparent2
1174 1174
1175 1175 if manifest2: # branch merge
1176 1176 if fparent2 == nullid or crev is None: # copied on remote side
1177 1177 if cfname in manifest2:
1178 1178 crev = manifest2[cfname]
1179 1179 newfparent = fparent1
1180 1180
1181 1181 # find source in nearest ancestor if we've lost track
1182 1182 if not crev:
1183 1183 self.ui.debug(" %s: searching for copy revision for %s\n" %
1184 1184 (fname, cfname))
1185 1185 for ancestor in self[None].ancestors():
1186 1186 if cfname in ancestor:
1187 1187 crev = ancestor[cfname].filenode()
1188 1188 break
1189 1189
1190 1190 if crev:
1191 1191 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1192 1192 meta["copy"] = cfname
1193 1193 meta["copyrev"] = hex(crev)
1194 1194 fparent1, fparent2 = nullid, newfparent
1195 1195 else:
1196 1196 self.ui.warn(_("warning: can't find ancestor for '%s' "
1197 1197 "copied from '%s'!\n") % (fname, cfname))
1198 1198
1199 1199 elif fparent2 != nullid:
1200 1200 # is one parent an ancestor of the other?
1201 1201 fparentancestor = flog.ancestor(fparent1, fparent2)
1202 1202 if fparentancestor == fparent1:
1203 1203 fparent1, fparent2 = fparent2, nullid
1204 1204 elif fparentancestor == fparent2:
1205 1205 fparent2 = nullid
1206 1206
1207 1207 # is the file changed?
1208 1208 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1209 1209 changelist.append(fname)
1210 1210 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1211 1211
1212 1212 # are just the flags changed during merge?
1213 1213 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1214 1214 changelist.append(fname)
1215 1215
1216 1216 return fparent1
1217 1217
1218 1218 def commit(self, text="", user=None, date=None, match=None, force=False,
1219 1219 editor=False, extra={}):
1220 1220 """Add a new revision to current repository.
1221 1221
1222 1222 Revision information is gathered from the working directory,
1223 1223 match can be used to filter the committed files. If editor is
1224 1224 supplied, it is called to get a commit message.
1225 1225 """
1226 1226
1227 1227 def fail(f, msg):
1228 1228 raise util.Abort('%s: %s' % (f, msg))
1229 1229
1230 1230 if not match:
1231 1231 match = matchmod.always(self.root, '')
1232 1232
1233 1233 if not force:
1234 1234 vdirs = []
1235 1235 match.dir = vdirs.append
1236 1236 match.bad = fail
1237 1237
1238 1238 wlock = self.wlock()
1239 1239 try:
1240 1240 wctx = self[None]
1241 1241 merge = len(wctx.parents()) > 1
1242 1242
1243 1243 if (not force and merge and match and
1244 1244 (match.files() or match.anypats())):
1245 1245 raise util.Abort(_('cannot partially commit a merge '
1246 1246 '(do not specify files or patterns)'))
1247 1247
1248 1248 changes = self.status(match=match, clean=force)
1249 1249 if force:
1250 1250 changes[0].extend(changes[6]) # mq may commit unchanged files
1251 1251
1252 1252 # check subrepos
1253 1253 subs = []
1254 1254 commitsubs = set()
1255 1255 newstate = wctx.substate.copy()
1256 1256 # only manage subrepos and .hgsubstate if .hgsub is present
1257 1257 if '.hgsub' in wctx:
1258 1258 # we'll decide whether to track this ourselves, thanks
1259 1259 if '.hgsubstate' in changes[0]:
1260 1260 changes[0].remove('.hgsubstate')
1261 1261 if '.hgsubstate' in changes[2]:
1262 1262 changes[2].remove('.hgsubstate')
1263 1263
1264 1264 # compare current state to last committed state
1265 1265 # build new substate based on last committed state
1266 1266 oldstate = wctx.p1().substate
1267 1267 for s in sorted(newstate.keys()):
1268 1268 if not match(s):
1269 1269 # ignore working copy, use old state if present
1270 1270 if s in oldstate:
1271 1271 newstate[s] = oldstate[s]
1272 1272 continue
1273 1273 if not force:
1274 1274 raise util.Abort(
1275 1275 _("commit with new subrepo %s excluded") % s)
1276 1276 if wctx.sub(s).dirty(True):
1277 1277 if not self.ui.configbool('ui', 'commitsubrepos'):
1278 1278 raise util.Abort(
1279 1279 _("uncommitted changes in subrepo %s") % s,
1280 1280 hint=_("use --subrepos for recursive commit"))
1281 1281 subs.append(s)
1282 1282 commitsubs.add(s)
1283 1283 else:
1284 1284 bs = wctx.sub(s).basestate()
1285 1285 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1286 1286 if oldstate.get(s, (None, None, None))[1] != bs:
1287 1287 subs.append(s)
1288 1288
1289 1289 # check for removed subrepos
1290 1290 for p in wctx.parents():
1291 1291 r = [s for s in p.substate if s not in newstate]
1292 1292 subs += [s for s in r if match(s)]
1293 1293 if subs:
1294 1294 if (not match('.hgsub') and
1295 1295 '.hgsub' in (wctx.modified() + wctx.added())):
1296 1296 raise util.Abort(
1297 1297 _("can't commit subrepos without .hgsub"))
1298 1298 changes[0].insert(0, '.hgsubstate')
1299 1299
1300 1300 elif '.hgsub' in changes[2]:
1301 1301 # clean up .hgsubstate when .hgsub is removed
1302 1302 if ('.hgsubstate' in wctx and
1303 1303 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1304 1304 changes[2].insert(0, '.hgsubstate')
1305 1305
1306 1306 # make sure all explicit patterns are matched
1307 1307 if not force and match.files():
1308 1308 matched = set(changes[0] + changes[1] + changes[2])
1309 1309
1310 1310 for f in match.files():
1311 1311 if f == '.' or f in matched or f in wctx.substate:
1312 1312 continue
1313 1313 if f in changes[3]: # missing
1314 1314 fail(f, _('file not found!'))
1315 1315 if f in vdirs: # visited directory
1316 1316 d = f + '/'
1317 1317 for mf in matched:
1318 1318 if mf.startswith(d):
1319 1319 break
1320 1320 else:
1321 1321 fail(f, _("no match under directory!"))
1322 1322 elif f not in self.dirstate:
1323 1323 fail(f, _("file not tracked!"))
1324 1324
1325 1325 if (not force and not extra.get("close") and not merge
1326 1326 and not (changes[0] or changes[1] or changes[2])
1327 1327 and wctx.branch() == wctx.p1().branch()):
1328 1328 return None
1329 1329
1330 1330 if merge and changes[3]:
1331 1331 raise util.Abort(_("cannot commit merge with missing files"))
1332 1332
1333 1333 ms = mergemod.mergestate(self)
1334 1334 for f in changes[0]:
1335 1335 if f in ms and ms[f] == 'u':
1336 1336 raise util.Abort(_("unresolved merge conflicts "
1337 1337 "(see hg help resolve)"))
1338 1338
1339 1339 cctx = context.workingctx(self, text, user, date, extra, changes)
1340 1340 if editor:
1341 1341 cctx._text = editor(self, cctx, subs)
1342 1342 edited = (text != cctx._text)
1343 1343
1344 1344 # commit subs and write new state
1345 1345 if subs:
1346 1346 for s in sorted(commitsubs):
1347 1347 sub = wctx.sub(s)
1348 1348 self.ui.status(_('committing subrepository %s\n') %
1349 1349 subrepo.subrelpath(sub))
1350 1350 sr = sub.commit(cctx._text, user, date)
1351 1351 newstate[s] = (newstate[s][0], sr)
1352 1352 subrepo.writestate(self, newstate)
1353 1353
1354 1354 # Save commit message in case this transaction gets rolled back
1355 1355 # (e.g. by a pretxncommit hook). Leave the content alone on
1356 1356 # the assumption that the user will use the same editor again.
1357 1357 msgfn = self.savecommitmessage(cctx._text)
1358 1358
1359 1359 p1, p2 = self.dirstate.parents()
1360 1360 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1361 1361 try:
1362 1362 self.hook("precommit", throw=True, parent1=hookp1,
1363 1363 parent2=hookp2)
1364 1364 ret = self.commitctx(cctx, True)
1365 1365 except: # re-raises
1366 1366 if edited:
1367 1367 self.ui.write(
1368 1368 _('note: commit message saved in %s\n') % msgfn)
1369 1369 raise
1370 1370
1371 1371 # update bookmarks, dirstate and mergestate
1372 1372 bookmarks.update(self, [p1, p2], ret)
1373 1373 for f in changes[0] + changes[1]:
1374 1374 self.dirstate.normal(f)
1375 1375 for f in changes[2]:
1376 1376 self.dirstate.drop(f)
1377 1377 self.dirstate.setparents(ret)
1378 1378 ms.reset()
1379 1379 finally:
1380 1380 wlock.release()
1381 1381
1382 1382 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1383 1383 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1384 1384 self._afterlock(commithook)
1385 1385 return ret
1386 1386
1387 1387 def commitctx(self, ctx, error=False):
1388 1388 """Add a new revision to current repository.
1389 1389 Revision information is passed via the context argument.
1390 1390 """
1391 1391
1392 1392 tr = lock = None
1393 1393 removed = list(ctx.removed())
1394 1394 p1, p2 = ctx.p1(), ctx.p2()
1395 1395 user = ctx.user()
1396 1396
1397 1397 lock = self.lock()
1398 1398 try:
1399 1399 tr = self.transaction("commit")
1400 1400 trp = weakref.proxy(tr)
1401 1401
1402 1402 if ctx.files():
1403 1403 m1 = p1.manifest().copy()
1404 1404 m2 = p2.manifest()
1405 1405
1406 1406 # check in files
1407 1407 new = {}
1408 1408 changed = []
1409 1409 linkrev = len(self)
1410 1410 for f in sorted(ctx.modified() + ctx.added()):
1411 1411 self.ui.note(f + "\n")
1412 1412 try:
1413 1413 fctx = ctx[f]
1414 1414 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1415 1415 changed)
1416 1416 m1.set(f, fctx.flags())
1417 1417 except OSError, inst:
1418 1418 self.ui.warn(_("trouble committing %s!\n") % f)
1419 1419 raise
1420 1420 except IOError, inst:
1421 1421 errcode = getattr(inst, 'errno', errno.ENOENT)
1422 1422 if error or errcode and errcode != errno.ENOENT:
1423 1423 self.ui.warn(_("trouble committing %s!\n") % f)
1424 1424 raise
1425 1425 else:
1426 1426 removed.append(f)
1427 1427
1428 1428 # update manifest
1429 1429 m1.update(new)
1430 1430 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1431 1431 drop = [f for f in removed if f in m1]
1432 1432 for f in drop:
1433 1433 del m1[f]
1434 1434 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1435 1435 p2.manifestnode(), (new, drop))
1436 1436 files = changed + removed
1437 1437 else:
1438 1438 mn = p1.manifestnode()
1439 1439 files = []
1440 1440
1441 1441 # update changelog
1442 1442 self.changelog.delayupdate()
1443 1443 n = self.changelog.add(mn, files, ctx.description(),
1444 1444 trp, p1.node(), p2.node(),
1445 1445 user, ctx.date(), ctx.extra().copy())
1446 1446 p = lambda: self.changelog.writepending() and self.root or ""
1447 1447 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1448 1448 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1449 1449 parent2=xp2, pending=p)
1450 1450 self.changelog.finalize(trp)
1451 1451 # set the new commit is proper phase
1452 1452 targetphase = phases.newcommitphase(self.ui)
1453 1453 if targetphase:
1454 1454 # retract boundary do not alter parent changeset.
1455 1455 # if a parent have higher the resulting phase will
1456 1456 # be compliant anyway
1457 1457 #
1458 1458 # if minimal phase was 0 we don't need to retract anything
1459 1459 phases.retractboundary(self, targetphase, [n])
1460 1460 tr.close()
1461 1461 self.updatebranchcache()
1462 1462 return n
1463 1463 finally:
1464 1464 if tr:
1465 1465 tr.release()
1466 1466 lock.release()
1467 1467
1468 1468 def destroyed(self, newheadnodes=None):
1469 1469 '''Inform the repository that nodes have been destroyed.
1470 1470 Intended for use by strip and rollback, so there's a common
1471 1471 place for anything that has to be done after destroying history.
1472 1472
1473 1473 If you know the branchheadcache was uptodate before nodes were removed
1474 1474 and you also know the set of candidate new heads that may have resulted
1475 1475 from the destruction, you can set newheadnodes. This will enable the
1476 1476 code to update the branchheads cache, rather than having future code
1477 1477 decide it's invalid and regenrating it from scratch.
1478 1478 '''
1479 1479 # If we have info, newheadnodes, on how to update the branch cache, do
1480 1480 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1481 1481 # will be caught the next time it is read.
1482 1482 if newheadnodes:
1483 1483 tiprev = len(self) - 1
1484 1484 ctxgen = (self[node] for node in newheadnodes
1485 1485 if self.changelog.hasnode(node))
1486 1486 self._updatebranchcache(self._branchcache, ctxgen)
1487 1487 self._writebranchcache(self._branchcache, self.changelog.tip(),
1488 1488 tiprev)
1489 1489
1490 1490 # Ensure the persistent tag cache is updated. Doing it now
1491 1491 # means that the tag cache only has to worry about destroyed
1492 1492 # heads immediately after a strip/rollback. That in turn
1493 1493 # guarantees that "cachetip == currenttip" (comparing both rev
1494 1494 # and node) always means no nodes have been added or destroyed.
1495 1495
1496 1496 # XXX this is suboptimal when qrefresh'ing: we strip the current
1497 1497 # head, refresh the tag cache, then immediately add a new head.
1498 1498 # But I think doing it this way is necessary for the "instant
1499 1499 # tag cache retrieval" case to work.
1500 1500 self.invalidatecaches()
1501 1501
1502 1502 def walk(self, match, node=None):
1503 1503 '''
1504 1504 walk recursively through the directory tree or a given
1505 1505 changeset, finding all files matched by the match
1506 1506 function
1507 1507 '''
1508 1508 return self[node].walk(match)
1509 1509
1510 1510 def status(self, node1='.', node2=None, match=None,
1511 1511 ignored=False, clean=False, unknown=False,
1512 1512 listsubrepos=False):
1513 1513 """return status of files between two nodes or node and working
1514 1514 directory.
1515 1515
1516 1516 If node1 is None, use the first dirstate parent instead.
1517 1517 If node2 is None, compare node1 with working directory.
1518 1518 """
1519 1519
1520 1520 def mfmatches(ctx):
1521 1521 mf = ctx.manifest().copy()
1522 1522 if match.always():
1523 1523 return mf
1524 1524 for fn in mf.keys():
1525 1525 if not match(fn):
1526 1526 del mf[fn]
1527 1527 return mf
1528 1528
1529 1529 if isinstance(node1, context.changectx):
1530 1530 ctx1 = node1
1531 1531 else:
1532 1532 ctx1 = self[node1]
1533 1533 if isinstance(node2, context.changectx):
1534 1534 ctx2 = node2
1535 1535 else:
1536 1536 ctx2 = self[node2]
1537 1537
1538 1538 working = ctx2.rev() is None
1539 1539 parentworking = working and ctx1 == self['.']
1540 1540 match = match or matchmod.always(self.root, self.getcwd())
1541 1541 listignored, listclean, listunknown = ignored, clean, unknown
1542 1542
1543 1543 # load earliest manifest first for caching reasons
1544 1544 if not working and ctx2.rev() < ctx1.rev():
1545 1545 ctx2.manifest()
1546 1546
1547 1547 if not parentworking:
1548 1548 def bad(f, msg):
1549 1549 # 'f' may be a directory pattern from 'match.files()',
1550 1550 # so 'f not in ctx1' is not enough
1551 1551 if f not in ctx1 and f not in ctx1.dirs():
1552 1552 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1553 1553 match.bad = bad
1554 1554
1555 1555 if working: # we need to scan the working dir
1556 1556 subrepos = []
1557 1557 if '.hgsub' in self.dirstate:
1558 1558 subrepos = ctx2.substate.keys()
1559 1559 s = self.dirstate.status(match, subrepos, listignored,
1560 1560 listclean, listunknown)
1561 1561 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1562 1562
1563 1563 # check for any possibly clean files
1564 1564 if parentworking and cmp:
1565 1565 fixup = []
1566 1566 # do a full compare of any files that might have changed
1567 1567 for f in sorted(cmp):
1568 1568 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1569 1569 or ctx1[f].cmp(ctx2[f])):
1570 1570 modified.append(f)
1571 1571 else:
1572 1572 fixup.append(f)
1573 1573
1574 1574 # update dirstate for files that are actually clean
1575 1575 if fixup:
1576 1576 if listclean:
1577 1577 clean += fixup
1578 1578
1579 1579 try:
1580 1580 # updating the dirstate is optional
1581 1581 # so we don't wait on the lock
1582 1582 wlock = self.wlock(False)
1583 1583 try:
1584 1584 for f in fixup:
1585 1585 self.dirstate.normal(f)
1586 1586 finally:
1587 1587 wlock.release()
1588 1588 except error.LockError:
1589 1589 pass
1590 1590
1591 1591 if not parentworking:
1592 1592 mf1 = mfmatches(ctx1)
1593 1593 if working:
1594 1594 # we are comparing working dir against non-parent
1595 1595 # generate a pseudo-manifest for the working dir
1596 1596 mf2 = mfmatches(self['.'])
1597 1597 for f in cmp + modified + added:
1598 1598 mf2[f] = None
1599 1599 mf2.set(f, ctx2.flags(f))
1600 1600 for f in removed:
1601 1601 if f in mf2:
1602 1602 del mf2[f]
1603 1603 else:
1604 1604 # we are comparing two revisions
1605 1605 deleted, unknown, ignored = [], [], []
1606 1606 mf2 = mfmatches(ctx2)
1607 1607
1608 1608 modified, added, clean = [], [], []
1609 1609 withflags = mf1.withflags() | mf2.withflags()
1610 1610 for fn in mf2:
1611 1611 if fn in mf1:
1612 1612 if (fn not in deleted and
1613 1613 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1614 1614 (mf1[fn] != mf2[fn] and
1615 1615 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1616 1616 modified.append(fn)
1617 1617 elif listclean:
1618 1618 clean.append(fn)
1619 1619 del mf1[fn]
1620 1620 elif fn not in deleted:
1621 1621 added.append(fn)
1622 1622 removed = mf1.keys()
1623 1623
1624 1624 if working and modified and not self.dirstate._checklink:
1625 1625 # Symlink placeholders may get non-symlink-like contents
1626 1626 # via user error or dereferencing by NFS or Samba servers,
1627 1627 # so we filter out any placeholders that don't look like a
1628 1628 # symlink
1629 1629 sane = []
1630 1630 for f in modified:
1631 1631 if ctx2.flags(f) == 'l':
1632 1632 d = ctx2[f].data()
1633 1633 if len(d) >= 1024 or '\n' in d or util.binary(d):
1634 1634 self.ui.debug('ignoring suspect symlink placeholder'
1635 1635 ' "%s"\n' % f)
1636 1636 continue
1637 1637 sane.append(f)
1638 1638 modified = sane
1639 1639
1640 1640 r = modified, added, removed, deleted, unknown, ignored, clean
1641 1641
1642 1642 if listsubrepos:
1643 1643 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1644 1644 if working:
1645 1645 rev2 = None
1646 1646 else:
1647 1647 rev2 = ctx2.substate[subpath][1]
1648 1648 try:
1649 1649 submatch = matchmod.narrowmatcher(subpath, match)
1650 1650 s = sub.status(rev2, match=submatch, ignored=listignored,
1651 1651 clean=listclean, unknown=listunknown,
1652 1652 listsubrepos=True)
1653 1653 for rfiles, sfiles in zip(r, s):
1654 1654 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1655 1655 except error.LookupError:
1656 1656 self.ui.status(_("skipping missing subrepository: %s\n")
1657 1657 % subpath)
1658 1658
1659 1659 for l in r:
1660 1660 l.sort()
1661 1661 return r
1662 1662
1663 1663 def heads(self, start=None):
1664 1664 heads = self.changelog.heads(start)
1665 1665 # sort the output in rev descending order
1666 1666 return sorted(heads, key=self.changelog.rev, reverse=True)
1667 1667
1668 1668 def branchheads(self, branch=None, start=None, closed=False):
1669 1669 '''return a (possibly filtered) list of heads for the given branch
1670 1670
1671 1671 Heads are returned in topological order, from newest to oldest.
1672 1672 If branch is None, use the dirstate branch.
1673 1673 If start is not None, return only heads reachable from start.
1674 1674 If closed is True, return heads that are marked as closed as well.
1675 1675 '''
1676 1676 if branch is None:
1677 1677 branch = self[None].branch()
1678 1678 branches = self.branchmap()
1679 1679 if branch not in branches:
1680 1680 return []
1681 1681 # the cache returns heads ordered lowest to highest
1682 1682 bheads = list(reversed(branches[branch]))
1683 1683 if start is not None:
1684 1684 # filter out the heads that cannot be reached from startrev
1685 1685 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1686 1686 bheads = [h for h in bheads if h in fbheads]
1687 1687 if not closed:
1688 1688 bheads = [h for h in bheads if not self[h].closesbranch()]
1689 1689 return bheads
1690 1690
1691 1691 def branches(self, nodes):
1692 1692 if not nodes:
1693 1693 nodes = [self.changelog.tip()]
1694 1694 b = []
1695 1695 for n in nodes:
1696 1696 t = n
1697 1697 while True:
1698 1698 p = self.changelog.parents(n)
1699 1699 if p[1] != nullid or p[0] == nullid:
1700 1700 b.append((t, n, p[0], p[1]))
1701 1701 break
1702 1702 n = p[0]
1703 1703 return b
1704 1704
1705 1705 def between(self, pairs):
1706 1706 r = []
1707 1707
1708 1708 for top, bottom in pairs:
1709 1709 n, l, i = top, [], 0
1710 1710 f = 1
1711 1711
1712 1712 while n != bottom and n != nullid:
1713 1713 p = self.changelog.parents(n)[0]
1714 1714 if i == f:
1715 1715 l.append(n)
1716 1716 f = f * 2
1717 1717 n = p
1718 1718 i += 1
1719 1719
1720 1720 r.append(l)
1721 1721
1722 1722 return r
1723 1723
1724 1724 def pull(self, remote, heads=None, force=False):
1725 1725 # don't open transaction for nothing or you break future useful
1726 1726 # rollback call
1727 1727 tr = None
1728 1728 trname = 'pull\n' + util.hidepassword(remote.url())
1729 1729 lock = self.lock()
1730 1730 try:
1731 1731 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1732 1732 force=force)
1733 1733 common, fetch, rheads = tmp
1734 1734 if not fetch:
1735 1735 self.ui.status(_("no changes found\n"))
1736 1736 added = []
1737 1737 result = 0
1738 1738 else:
1739 1739 tr = self.transaction(trname)
1740 1740 if heads is None and list(common) == [nullid]:
1741 1741 self.ui.status(_("requesting all changes\n"))
1742 1742 elif heads is None and remote.capable('changegroupsubset'):
1743 1743 # issue1320, avoid a race if remote changed after discovery
1744 1744 heads = rheads
1745 1745
1746 1746 if remote.capable('getbundle'):
1747 1747 cg = remote.getbundle('pull', common=common,
1748 1748 heads=heads or rheads)
1749 1749 elif heads is None:
1750 1750 cg = remote.changegroup(fetch, 'pull')
1751 1751 elif not remote.capable('changegroupsubset'):
1752 1752 raise util.Abort(_("partial pull cannot be done because "
1753 1753 "other repository doesn't support "
1754 1754 "changegroupsubset."))
1755 1755 else:
1756 1756 cg = remote.changegroupsubset(fetch, heads, 'pull')
1757 1757 clstart = len(self.changelog)
1758 1758 result = self.addchangegroup(cg, 'pull', remote.url())
1759 1759 clend = len(self.changelog)
1760 1760 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1761 1761
1762 1762 # compute target subset
1763 1763 if heads is None:
1764 1764 # We pulled every thing possible
1765 1765 # sync on everything common
1766 1766 subset = common + added
1767 1767 else:
1768 1768 # We pulled a specific subset
1769 1769 # sync on this subset
1770 1770 subset = heads
1771 1771
1772 1772 # Get remote phases data from remote
1773 1773 remotephases = remote.listkeys('phases')
1774 1774 publishing = bool(remotephases.get('publishing', False))
1775 1775 if remotephases and not publishing:
1776 1776 # remote is new and unpublishing
1777 1777 pheads, _dr = phases.analyzeremotephases(self, subset,
1778 1778 remotephases)
1779 1779 phases.advanceboundary(self, phases.public, pheads)
1780 1780 phases.advanceboundary(self, phases.draft, subset)
1781 1781 else:
1782 1782 # Remote is old or publishing all common changesets
1783 1783 # should be seen as public
1784 1784 phases.advanceboundary(self, phases.public, subset)
1785 1785
1786 1786 self.ui.debug('fetching remote obsolete markers')
1787 1787 remoteobs = remote.listkeys('obsolete')
1788 if 'dump' in remoteobs:
1788 if 'dump0' in remoteobs:
1789 1789 if tr is None:
1790 1790 tr = self.transaction(trname)
1791 data = base85.b85decode(remoteobs['dump'])
1792 self.obsstore.mergemarkers(tr, data)
1791 for key in sorted(remoteobs, reverse=True):
1792 if key.startswith('dump'):
1793 data = base85.b85decode(remoteobs[key])
1794 self.obsstore.mergemarkers(tr, data)
1793 1795 if tr is not None:
1794 1796 tr.close()
1795 1797 finally:
1796 1798 if tr is not None:
1797 1799 tr.release()
1798 1800 lock.release()
1799 1801
1800 1802 return result
1801 1803
1802 1804 def checkpush(self, force, revs):
1803 1805 """Extensions can override this function if additional checks have
1804 1806 to be performed before pushing, or call it if they override push
1805 1807 command.
1806 1808 """
1807 1809 pass
1808 1810
1809 1811 def push(self, remote, force=False, revs=None, newbranch=False):
1810 1812 '''Push outgoing changesets (limited by revs) from the current
1811 1813 repository to remote. Return an integer:
1812 1814 - None means nothing to push
1813 1815 - 0 means HTTP error
1814 1816 - 1 means we pushed and remote head count is unchanged *or*
1815 1817 we have outgoing changesets but refused to push
1816 1818 - other values as described by addchangegroup()
1817 1819 '''
1818 1820 # there are two ways to push to remote repo:
1819 1821 #
1820 1822 # addchangegroup assumes local user can lock remote
1821 1823 # repo (local filesystem, old ssh servers).
1822 1824 #
1823 1825 # unbundle assumes local user cannot lock remote repo (new ssh
1824 1826 # servers, http servers).
1825 1827
1826 1828 if not remote.canpush():
1827 1829 raise util.Abort(_("destination does not support push"))
1828 1830 # get local lock as we might write phase data
1829 1831 locallock = self.lock()
1830 1832 try:
1831 1833 self.checkpush(force, revs)
1832 1834 lock = None
1833 1835 unbundle = remote.capable('unbundle')
1834 1836 if not unbundle:
1835 1837 lock = remote.lock()
1836 1838 try:
1837 1839 # discovery
1838 1840 fci = discovery.findcommonincoming
1839 1841 commoninc = fci(self, remote, force=force)
1840 1842 common, inc, remoteheads = commoninc
1841 1843 fco = discovery.findcommonoutgoing
1842 1844 outgoing = fco(self, remote, onlyheads=revs,
1843 1845 commoninc=commoninc, force=force)
1844 1846
1845 1847
1846 1848 if not outgoing.missing:
1847 1849 # nothing to push
1848 1850 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1849 1851 ret = None
1850 1852 else:
1851 1853 # something to push
1852 1854 if not force:
1853 1855 # if self.obsstore == False --> no obsolete
1854 1856 # then, save the iteration
1855 1857 if self.obsstore:
1856 1858 # this message are here for 80 char limit reason
1857 1859 mso = _("push includes an obsolete changeset: %s!")
1858 1860 msu = _("push includes an unstable changeset: %s!")
1859 1861 # If we are to push if there is at least one
1860 1862 # obsolete or unstable changeset in missing, at
1861 1863 # least one of the missinghead will be obsolete or
1862 1864 # unstable. So checking heads only is ok
1863 1865 for node in outgoing.missingheads:
1864 1866 ctx = self[node]
1865 1867 if ctx.obsolete():
1866 1868 raise util.Abort(_(mso) % ctx)
1867 1869 elif ctx.unstable():
1868 1870 raise util.Abort(_(msu) % ctx)
1869 1871 discovery.checkheads(self, remote, outgoing,
1870 1872 remoteheads, newbranch,
1871 1873 bool(inc))
1872 1874
1873 1875 # create a changegroup from local
1874 1876 if revs is None and not outgoing.excluded:
1875 1877 # push everything,
1876 1878 # use the fast path, no race possible on push
1877 1879 cg = self._changegroup(outgoing.missing, 'push')
1878 1880 else:
1879 1881 cg = self.getlocalbundle('push', outgoing)
1880 1882
1881 1883 # apply changegroup to remote
1882 1884 if unbundle:
1883 1885 # local repo finds heads on server, finds out what
1884 1886 # revs it must push. once revs transferred, if server
1885 1887 # finds it has different heads (someone else won
1886 1888 # commit/push race), server aborts.
1887 1889 if force:
1888 1890 remoteheads = ['force']
1889 1891 # ssh: return remote's addchangegroup()
1890 1892 # http: return remote's addchangegroup() or 0 for error
1891 1893 ret = remote.unbundle(cg, remoteheads, 'push')
1892 1894 else:
1893 1895 # we return an integer indicating remote head count
1894 1896 # change
1895 1897 ret = remote.addchangegroup(cg, 'push', self.url())
1896 1898
1897 1899 if ret:
1898 1900 # push succeed, synchonize target of the push
1899 1901 cheads = outgoing.missingheads
1900 1902 elif revs is None:
1901 1903 # All out push fails. synchronize all common
1902 1904 cheads = outgoing.commonheads
1903 1905 else:
1904 1906 # I want cheads = heads(::missingheads and ::commonheads)
1905 1907 # (missingheads is revs with secret changeset filtered out)
1906 1908 #
1907 1909 # This can be expressed as:
1908 1910 # cheads = ( (missingheads and ::commonheads)
1909 1911 # + (commonheads and ::missingheads))"
1910 1912 # )
1911 1913 #
1912 1914 # while trying to push we already computed the following:
1913 1915 # common = (::commonheads)
1914 1916 # missing = ((commonheads::missingheads) - commonheads)
1915 1917 #
1916 1918 # We can pick:
1917 1919 # * missingheads part of comon (::commonheads)
1918 1920 common = set(outgoing.common)
1919 1921 cheads = [node for node in revs if node in common]
1920 1922 # and
1921 1923 # * commonheads parents on missing
1922 1924 revset = self.set('%ln and parents(roots(%ln))',
1923 1925 outgoing.commonheads,
1924 1926 outgoing.missing)
1925 1927 cheads.extend(c.node() for c in revset)
1926 1928 # even when we don't push, exchanging phase data is useful
1927 1929 remotephases = remote.listkeys('phases')
1928 1930 if not remotephases: # old server or public only repo
1929 1931 phases.advanceboundary(self, phases.public, cheads)
1930 1932 # don't push any phase data as there is nothing to push
1931 1933 else:
1932 1934 ana = phases.analyzeremotephases(self, cheads, remotephases)
1933 1935 pheads, droots = ana
1934 1936 ### Apply remote phase on local
1935 1937 if remotephases.get('publishing', False):
1936 1938 phases.advanceboundary(self, phases.public, cheads)
1937 1939 else: # publish = False
1938 1940 phases.advanceboundary(self, phases.public, pheads)
1939 1941 phases.advanceboundary(self, phases.draft, cheads)
1940 1942 ### Apply local phase on remote
1941 1943
1942 1944 # Get the list of all revs draft on remote by public here.
1943 1945 # XXX Beware that revset break if droots is not strictly
1944 1946 # XXX root we may want to ensure it is but it is costly
1945 1947 outdated = self.set('heads((%ln::%ln) and public())',
1946 1948 droots, cheads)
1947 1949 for newremotehead in outdated:
1948 1950 r = remote.pushkey('phases',
1949 1951 newremotehead.hex(),
1950 1952 str(phases.draft),
1951 1953 str(phases.public))
1952 1954 if not r:
1953 1955 self.ui.warn(_('updating %s to public failed!\n')
1954 1956 % newremotehead)
1955 1957 self.ui.debug('try to push obsolete markers to remote\n')
1956 1958 if (self.obsstore and
1957 1959 'obsolete' in remote.listkeys('namespaces')):
1958 data = self.listkeys('obsolete')['dump']
1959 r = remote.pushkey('obsolete', 'dump', '', data)
1960 if not r:
1961 self.ui.warn(_('failed to push obsolete markers!\n'))
1960 rslts = []
1961 remotedata = self.listkeys('obsolete')
1962 for key in sorted(remotedata, reverse=True):
1963 # reverse sort to ensure we end with dump0
1964 data = remotedata[key]
1965 rslts.append(remote.pushkey('obsolete', key, '', data))
1966 if [r for r in rslts if not r]:
1967 msg = _('failed to push some obsolete markers!\n')
1968 self.ui.warn(msg)
1962 1969 finally:
1963 1970 if lock is not None:
1964 1971 lock.release()
1965 1972 finally:
1966 1973 locallock.release()
1967 1974
1968 1975 self.ui.debug("checking for updated bookmarks\n")
1969 1976 rb = remote.listkeys('bookmarks')
1970 1977 for k in rb.keys():
1971 1978 if k in self._bookmarks:
1972 1979 nr, nl = rb[k], hex(self._bookmarks[k])
1973 1980 if nr in self:
1974 1981 cr = self[nr]
1975 1982 cl = self[nl]
1976 1983 if cl in cr.descendants():
1977 1984 r = remote.pushkey('bookmarks', k, nr, nl)
1978 1985 if r:
1979 1986 self.ui.status(_("updating bookmark %s\n") % k)
1980 1987 else:
1981 1988 self.ui.warn(_('updating bookmark %s'
1982 1989 ' failed!\n') % k)
1983 1990
1984 1991 return ret
1985 1992
1986 1993 def changegroupinfo(self, nodes, source):
1987 1994 if self.ui.verbose or source == 'bundle':
1988 1995 self.ui.status(_("%d changesets found\n") % len(nodes))
1989 1996 if self.ui.debugflag:
1990 1997 self.ui.debug("list of changesets:\n")
1991 1998 for node in nodes:
1992 1999 self.ui.debug("%s\n" % hex(node))
1993 2000
1994 2001 def changegroupsubset(self, bases, heads, source):
1995 2002 """Compute a changegroup consisting of all the nodes that are
1996 2003 descendants of any of the bases and ancestors of any of the heads.
1997 2004 Return a chunkbuffer object whose read() method will return
1998 2005 successive changegroup chunks.
1999 2006
2000 2007 It is fairly complex as determining which filenodes and which
2001 2008 manifest nodes need to be included for the changeset to be complete
2002 2009 is non-trivial.
2003 2010
2004 2011 Another wrinkle is doing the reverse, figuring out which changeset in
2005 2012 the changegroup a particular filenode or manifestnode belongs to.
2006 2013 """
2007 2014 cl = self.changelog
2008 2015 if not bases:
2009 2016 bases = [nullid]
2010 2017 csets, bases, heads = cl.nodesbetween(bases, heads)
2011 2018 # We assume that all ancestors of bases are known
2012 2019 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2013 2020 return self._changegroupsubset(common, csets, heads, source)
2014 2021
2015 2022 def getlocalbundle(self, source, outgoing):
2016 2023 """Like getbundle, but taking a discovery.outgoing as an argument.
2017 2024
2018 2025 This is only implemented for local repos and reuses potentially
2019 2026 precomputed sets in outgoing."""
2020 2027 if not outgoing.missing:
2021 2028 return None
2022 2029 return self._changegroupsubset(outgoing.common,
2023 2030 outgoing.missing,
2024 2031 outgoing.missingheads,
2025 2032 source)
2026 2033
2027 2034 def getbundle(self, source, heads=None, common=None):
2028 2035 """Like changegroupsubset, but returns the set difference between the
2029 2036 ancestors of heads and the ancestors common.
2030 2037
2031 2038 If heads is None, use the local heads. If common is None, use [nullid].
2032 2039
2033 2040 The nodes in common might not all be known locally due to the way the
2034 2041 current discovery protocol works.
2035 2042 """
2036 2043 cl = self.changelog
2037 2044 if common:
2038 2045 nm = cl.nodemap
2039 2046 common = [n for n in common if n in nm]
2040 2047 else:
2041 2048 common = [nullid]
2042 2049 if not heads:
2043 2050 heads = cl.heads()
2044 2051 return self.getlocalbundle(source,
2045 2052 discovery.outgoing(cl, common, heads))
2046 2053
2047 2054 def _changegroupsubset(self, commonrevs, csets, heads, source):
2048 2055
2049 2056 cl = self.changelog
2050 2057 mf = self.manifest
2051 2058 mfs = {} # needed manifests
2052 2059 fnodes = {} # needed file nodes
2053 2060 changedfiles = set()
2054 2061 fstate = ['', {}]
2055 2062 count = [0, 0]
2056 2063
2057 2064 # can we go through the fast path ?
2058 2065 heads.sort()
2059 2066 if heads == sorted(self.heads()):
2060 2067 return self._changegroup(csets, source)
2061 2068
2062 2069 # slow path
2063 2070 self.hook('preoutgoing', throw=True, source=source)
2064 2071 self.changegroupinfo(csets, source)
2065 2072
2066 2073 # filter any nodes that claim to be part of the known set
2067 2074 def prune(revlog, missing):
2068 2075 rr, rl = revlog.rev, revlog.linkrev
2069 2076 return [n for n in missing
2070 2077 if rl(rr(n)) not in commonrevs]
2071 2078
2072 2079 progress = self.ui.progress
2073 2080 _bundling = _('bundling')
2074 2081 _changesets = _('changesets')
2075 2082 _manifests = _('manifests')
2076 2083 _files = _('files')
2077 2084
2078 2085 def lookup(revlog, x):
2079 2086 if revlog == cl:
2080 2087 c = cl.read(x)
2081 2088 changedfiles.update(c[3])
2082 2089 mfs.setdefault(c[0], x)
2083 2090 count[0] += 1
2084 2091 progress(_bundling, count[0],
2085 2092 unit=_changesets, total=count[1])
2086 2093 return x
2087 2094 elif revlog == mf:
2088 2095 clnode = mfs[x]
2089 2096 mdata = mf.readfast(x)
2090 2097 for f, n in mdata.iteritems():
2091 2098 if f in changedfiles:
2092 2099 fnodes[f].setdefault(n, clnode)
2093 2100 count[0] += 1
2094 2101 progress(_bundling, count[0],
2095 2102 unit=_manifests, total=count[1])
2096 2103 return clnode
2097 2104 else:
2098 2105 progress(_bundling, count[0], item=fstate[0],
2099 2106 unit=_files, total=count[1])
2100 2107 return fstate[1][x]
2101 2108
2102 2109 bundler = changegroup.bundle10(lookup)
2103 2110 reorder = self.ui.config('bundle', 'reorder', 'auto')
2104 2111 if reorder == 'auto':
2105 2112 reorder = None
2106 2113 else:
2107 2114 reorder = util.parsebool(reorder)
2108 2115
2109 2116 def gengroup():
2110 2117 # Create a changenode group generator that will call our functions
2111 2118 # back to lookup the owning changenode and collect information.
2112 2119 count[:] = [0, len(csets)]
2113 2120 for chunk in cl.group(csets, bundler, reorder=reorder):
2114 2121 yield chunk
2115 2122 progress(_bundling, None)
2116 2123
2117 2124 # Create a generator for the manifestnodes that calls our lookup
2118 2125 # and data collection functions back.
2119 2126 for f in changedfiles:
2120 2127 fnodes[f] = {}
2121 2128 count[:] = [0, len(mfs)]
2122 2129 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2123 2130 yield chunk
2124 2131 progress(_bundling, None)
2125 2132
2126 2133 mfs.clear()
2127 2134
2128 2135 # Go through all our files in order sorted by name.
2129 2136 count[:] = [0, len(changedfiles)]
2130 2137 for fname in sorted(changedfiles):
2131 2138 filerevlog = self.file(fname)
2132 2139 if not len(filerevlog):
2133 2140 raise util.Abort(_("empty or missing revlog for %s")
2134 2141 % fname)
2135 2142 fstate[0] = fname
2136 2143 fstate[1] = fnodes.pop(fname, {})
2137 2144
2138 2145 nodelist = prune(filerevlog, fstate[1])
2139 2146 if nodelist:
2140 2147 count[0] += 1
2141 2148 yield bundler.fileheader(fname)
2142 2149 for chunk in filerevlog.group(nodelist, bundler, reorder):
2143 2150 yield chunk
2144 2151
2145 2152 # Signal that no more groups are left.
2146 2153 yield bundler.close()
2147 2154 progress(_bundling, None)
2148 2155
2149 2156 if csets:
2150 2157 self.hook('outgoing', node=hex(csets[0]), source=source)
2151 2158
2152 2159 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2153 2160
2154 2161 def changegroup(self, basenodes, source):
2155 2162 # to avoid a race we use changegroupsubset() (issue1320)
2156 2163 return self.changegroupsubset(basenodes, self.heads(), source)
2157 2164
2158 2165 def _changegroup(self, nodes, source):
2159 2166 """Compute the changegroup of all nodes that we have that a recipient
2160 2167 doesn't. Return a chunkbuffer object whose read() method will return
2161 2168 successive changegroup chunks.
2162 2169
2163 2170 This is much easier than the previous function as we can assume that
2164 2171 the recipient has any changenode we aren't sending them.
2165 2172
2166 2173 nodes is the set of nodes to send"""
2167 2174
2168 2175 cl = self.changelog
2169 2176 mf = self.manifest
2170 2177 mfs = {}
2171 2178 changedfiles = set()
2172 2179 fstate = ['']
2173 2180 count = [0, 0]
2174 2181
2175 2182 self.hook('preoutgoing', throw=True, source=source)
2176 2183 self.changegroupinfo(nodes, source)
2177 2184
2178 2185 revset = set([cl.rev(n) for n in nodes])
2179 2186
2180 2187 def gennodelst(log):
2181 2188 ln, llr = log.node, log.linkrev
2182 2189 return [ln(r) for r in log if llr(r) in revset]
2183 2190
2184 2191 progress = self.ui.progress
2185 2192 _bundling = _('bundling')
2186 2193 _changesets = _('changesets')
2187 2194 _manifests = _('manifests')
2188 2195 _files = _('files')
2189 2196
2190 2197 def lookup(revlog, x):
2191 2198 if revlog == cl:
2192 2199 c = cl.read(x)
2193 2200 changedfiles.update(c[3])
2194 2201 mfs.setdefault(c[0], x)
2195 2202 count[0] += 1
2196 2203 progress(_bundling, count[0],
2197 2204 unit=_changesets, total=count[1])
2198 2205 return x
2199 2206 elif revlog == mf:
2200 2207 count[0] += 1
2201 2208 progress(_bundling, count[0],
2202 2209 unit=_manifests, total=count[1])
2203 2210 return cl.node(revlog.linkrev(revlog.rev(x)))
2204 2211 else:
2205 2212 progress(_bundling, count[0], item=fstate[0],
2206 2213 total=count[1], unit=_files)
2207 2214 return cl.node(revlog.linkrev(revlog.rev(x)))
2208 2215
2209 2216 bundler = changegroup.bundle10(lookup)
2210 2217 reorder = self.ui.config('bundle', 'reorder', 'auto')
2211 2218 if reorder == 'auto':
2212 2219 reorder = None
2213 2220 else:
2214 2221 reorder = util.parsebool(reorder)
2215 2222
2216 2223 def gengroup():
2217 2224 '''yield a sequence of changegroup chunks (strings)'''
2218 2225 # construct a list of all changed files
2219 2226
2220 2227 count[:] = [0, len(nodes)]
2221 2228 for chunk in cl.group(nodes, bundler, reorder=reorder):
2222 2229 yield chunk
2223 2230 progress(_bundling, None)
2224 2231
2225 2232 count[:] = [0, len(mfs)]
2226 2233 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2227 2234 yield chunk
2228 2235 progress(_bundling, None)
2229 2236
2230 2237 count[:] = [0, len(changedfiles)]
2231 2238 for fname in sorted(changedfiles):
2232 2239 filerevlog = self.file(fname)
2233 2240 if not len(filerevlog):
2234 2241 raise util.Abort(_("empty or missing revlog for %s")
2235 2242 % fname)
2236 2243 fstate[0] = fname
2237 2244 nodelist = gennodelst(filerevlog)
2238 2245 if nodelist:
2239 2246 count[0] += 1
2240 2247 yield bundler.fileheader(fname)
2241 2248 for chunk in filerevlog.group(nodelist, bundler, reorder):
2242 2249 yield chunk
2243 2250 yield bundler.close()
2244 2251 progress(_bundling, None)
2245 2252
2246 2253 if nodes:
2247 2254 self.hook('outgoing', node=hex(nodes[0]), source=source)
2248 2255
2249 2256 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2250 2257
2251 2258 def addchangegroup(self, source, srctype, url, emptyok=False):
2252 2259 """Add the changegroup returned by source.read() to this repo.
2253 2260 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2254 2261 the URL of the repo where this changegroup is coming from.
2255 2262
2256 2263 Return an integer summarizing the change to this repo:
2257 2264 - nothing changed or no source: 0
2258 2265 - more heads than before: 1+added heads (2..n)
2259 2266 - fewer heads than before: -1-removed heads (-2..-n)
2260 2267 - number of heads stays the same: 1
2261 2268 """
2262 2269 def csmap(x):
2263 2270 self.ui.debug("add changeset %s\n" % short(x))
2264 2271 return len(cl)
2265 2272
2266 2273 def revmap(x):
2267 2274 return cl.rev(x)
2268 2275
2269 2276 if not source:
2270 2277 return 0
2271 2278
2272 2279 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2273 2280
2274 2281 changesets = files = revisions = 0
2275 2282 efiles = set()
2276 2283
2277 2284 # write changelog data to temp files so concurrent readers will not see
2278 2285 # inconsistent view
2279 2286 cl = self.changelog
2280 2287 cl.delayupdate()
2281 2288 oldheads = cl.heads()
2282 2289
2283 2290 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2284 2291 try:
2285 2292 trp = weakref.proxy(tr)
2286 2293 # pull off the changeset group
2287 2294 self.ui.status(_("adding changesets\n"))
2288 2295 clstart = len(cl)
2289 2296 class prog(object):
2290 2297 step = _('changesets')
2291 2298 count = 1
2292 2299 ui = self.ui
2293 2300 total = None
2294 2301 def __call__(self):
2295 2302 self.ui.progress(self.step, self.count, unit=_('chunks'),
2296 2303 total=self.total)
2297 2304 self.count += 1
2298 2305 pr = prog()
2299 2306 source.callback = pr
2300 2307
2301 2308 source.changelogheader()
2302 2309 srccontent = cl.addgroup(source, csmap, trp)
2303 2310 if not (srccontent or emptyok):
2304 2311 raise util.Abort(_("received changelog group is empty"))
2305 2312 clend = len(cl)
2306 2313 changesets = clend - clstart
2307 2314 for c in xrange(clstart, clend):
2308 2315 efiles.update(self[c].files())
2309 2316 efiles = len(efiles)
2310 2317 self.ui.progress(_('changesets'), None)
2311 2318
2312 2319 # pull off the manifest group
2313 2320 self.ui.status(_("adding manifests\n"))
2314 2321 pr.step = _('manifests')
2315 2322 pr.count = 1
2316 2323 pr.total = changesets # manifests <= changesets
2317 2324 # no need to check for empty manifest group here:
2318 2325 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2319 2326 # no new manifest will be created and the manifest group will
2320 2327 # be empty during the pull
2321 2328 source.manifestheader()
2322 2329 self.manifest.addgroup(source, revmap, trp)
2323 2330 self.ui.progress(_('manifests'), None)
2324 2331
2325 2332 needfiles = {}
2326 2333 if self.ui.configbool('server', 'validate', default=False):
2327 2334 # validate incoming csets have their manifests
2328 2335 for cset in xrange(clstart, clend):
2329 2336 mfest = self.changelog.read(self.changelog.node(cset))[0]
2330 2337 mfest = self.manifest.readdelta(mfest)
2331 2338 # store file nodes we must see
2332 2339 for f, n in mfest.iteritems():
2333 2340 needfiles.setdefault(f, set()).add(n)
2334 2341
2335 2342 # process the files
2336 2343 self.ui.status(_("adding file changes\n"))
2337 2344 pr.step = _('files')
2338 2345 pr.count = 1
2339 2346 pr.total = efiles
2340 2347 source.callback = None
2341 2348
2342 2349 while True:
2343 2350 chunkdata = source.filelogheader()
2344 2351 if not chunkdata:
2345 2352 break
2346 2353 f = chunkdata["filename"]
2347 2354 self.ui.debug("adding %s revisions\n" % f)
2348 2355 pr()
2349 2356 fl = self.file(f)
2350 2357 o = len(fl)
2351 2358 if not fl.addgroup(source, revmap, trp):
2352 2359 raise util.Abort(_("received file revlog group is empty"))
2353 2360 revisions += len(fl) - o
2354 2361 files += 1
2355 2362 if f in needfiles:
2356 2363 needs = needfiles[f]
2357 2364 for new in xrange(o, len(fl)):
2358 2365 n = fl.node(new)
2359 2366 if n in needs:
2360 2367 needs.remove(n)
2361 2368 if not needs:
2362 2369 del needfiles[f]
2363 2370 self.ui.progress(_('files'), None)
2364 2371
2365 2372 for f, needs in needfiles.iteritems():
2366 2373 fl = self.file(f)
2367 2374 for n in needs:
2368 2375 try:
2369 2376 fl.rev(n)
2370 2377 except error.LookupError:
2371 2378 raise util.Abort(
2372 2379 _('missing file data for %s:%s - run hg verify') %
2373 2380 (f, hex(n)))
2374 2381
2375 2382 dh = 0
2376 2383 if oldheads:
2377 2384 heads = cl.heads()
2378 2385 dh = len(heads) - len(oldheads)
2379 2386 for h in heads:
2380 2387 if h not in oldheads and self[h].closesbranch():
2381 2388 dh -= 1
2382 2389 htext = ""
2383 2390 if dh:
2384 2391 htext = _(" (%+d heads)") % dh
2385 2392
2386 2393 self.ui.status(_("added %d changesets"
2387 2394 " with %d changes to %d files%s\n")
2388 2395 % (changesets, revisions, files, htext))
2389 2396
2390 2397 if changesets > 0:
2391 2398 p = lambda: cl.writepending() and self.root or ""
2392 2399 self.hook('pretxnchangegroup', throw=True,
2393 2400 node=hex(cl.node(clstart)), source=srctype,
2394 2401 url=url, pending=p)
2395 2402
2396 2403 added = [cl.node(r) for r in xrange(clstart, clend)]
2397 2404 publishing = self.ui.configbool('phases', 'publish', True)
2398 2405 if srctype == 'push':
2399 2406 # Old server can not push the boundary themself.
2400 2407 # New server won't push the boundary if changeset already
2401 2408 # existed locally as secrete
2402 2409 #
2403 2410 # We should not use added here but the list of all change in
2404 2411 # the bundle
2405 2412 if publishing:
2406 2413 phases.advanceboundary(self, phases.public, srccontent)
2407 2414 else:
2408 2415 phases.advanceboundary(self, phases.draft, srccontent)
2409 2416 phases.retractboundary(self, phases.draft, added)
2410 2417 elif srctype != 'strip':
2411 2418 # publishing only alter behavior during push
2412 2419 #
2413 2420 # strip should not touch boundary at all
2414 2421 phases.retractboundary(self, phases.draft, added)
2415 2422
2416 2423 # make changelog see real files again
2417 2424 cl.finalize(trp)
2418 2425
2419 2426 tr.close()
2420 2427
2421 2428 if changesets > 0:
2422 2429 def runhooks():
2423 2430 # forcefully update the on-disk branch cache
2424 2431 self.ui.debug("updating the branch cache\n")
2425 2432 self.updatebranchcache()
2426 2433 self.hook("changegroup", node=hex(cl.node(clstart)),
2427 2434 source=srctype, url=url)
2428 2435
2429 2436 for n in added:
2430 2437 self.hook("incoming", node=hex(n), source=srctype,
2431 2438 url=url)
2432 2439 self._afterlock(runhooks)
2433 2440
2434 2441 finally:
2435 2442 tr.release()
2436 2443 # never return 0 here:
2437 2444 if dh < 0:
2438 2445 return dh - 1
2439 2446 else:
2440 2447 return dh + 1
2441 2448
2442 2449 def stream_in(self, remote, requirements):
2443 2450 lock = self.lock()
2444 2451 try:
2445 2452 fp = remote.stream_out()
2446 2453 l = fp.readline()
2447 2454 try:
2448 2455 resp = int(l)
2449 2456 except ValueError:
2450 2457 raise error.ResponseError(
2451 2458 _('unexpected response from remote server:'), l)
2452 2459 if resp == 1:
2453 2460 raise util.Abort(_('operation forbidden by server'))
2454 2461 elif resp == 2:
2455 2462 raise util.Abort(_('locking the remote repository failed'))
2456 2463 elif resp != 0:
2457 2464 raise util.Abort(_('the server sent an unknown error code'))
2458 2465 self.ui.status(_('streaming all changes\n'))
2459 2466 l = fp.readline()
2460 2467 try:
2461 2468 total_files, total_bytes = map(int, l.split(' ', 1))
2462 2469 except (ValueError, TypeError):
2463 2470 raise error.ResponseError(
2464 2471 _('unexpected response from remote server:'), l)
2465 2472 self.ui.status(_('%d files to transfer, %s of data\n') %
2466 2473 (total_files, util.bytecount(total_bytes)))
2467 2474 handled_bytes = 0
2468 2475 self.ui.progress(_('clone'), 0, total=total_bytes)
2469 2476 start = time.time()
2470 2477 for i in xrange(total_files):
2471 2478 # XXX doesn't support '\n' or '\r' in filenames
2472 2479 l = fp.readline()
2473 2480 try:
2474 2481 name, size = l.split('\0', 1)
2475 2482 size = int(size)
2476 2483 except (ValueError, TypeError):
2477 2484 raise error.ResponseError(
2478 2485 _('unexpected response from remote server:'), l)
2479 2486 if self.ui.debugflag:
2480 2487 self.ui.debug('adding %s (%s)\n' %
2481 2488 (name, util.bytecount(size)))
2482 2489 # for backwards compat, name was partially encoded
2483 2490 ofp = self.sopener(store.decodedir(name), 'w')
2484 2491 for chunk in util.filechunkiter(fp, limit=size):
2485 2492 handled_bytes += len(chunk)
2486 2493 self.ui.progress(_('clone'), handled_bytes,
2487 2494 total=total_bytes)
2488 2495 ofp.write(chunk)
2489 2496 ofp.close()
2490 2497 elapsed = time.time() - start
2491 2498 if elapsed <= 0:
2492 2499 elapsed = 0.001
2493 2500 self.ui.progress(_('clone'), None)
2494 2501 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2495 2502 (util.bytecount(total_bytes), elapsed,
2496 2503 util.bytecount(total_bytes / elapsed)))
2497 2504
2498 2505 # new requirements = old non-format requirements +
2499 2506 # new format-related
2500 2507 # requirements from the streamed-in repository
2501 2508 requirements.update(set(self.requirements) - self.supportedformats)
2502 2509 self._applyrequirements(requirements)
2503 2510 self._writerequirements()
2504 2511
2505 2512 self.invalidate()
2506 2513 return len(self.heads()) + 1
2507 2514 finally:
2508 2515 lock.release()
2509 2516
2510 2517 def clone(self, remote, heads=[], stream=False):
2511 2518 '''clone remote repository.
2512 2519
2513 2520 keyword arguments:
2514 2521 heads: list of revs to clone (forces use of pull)
2515 2522 stream: use streaming clone if possible'''
2516 2523
2517 2524 # now, all clients that can request uncompressed clones can
2518 2525 # read repo formats supported by all servers that can serve
2519 2526 # them.
2520 2527
2521 2528 # if revlog format changes, client will have to check version
2522 2529 # and format flags on "stream" capability, and use
2523 2530 # uncompressed only if compatible.
2524 2531
2525 2532 if not stream:
2526 2533 # if the server explicitely prefer to stream (for fast LANs)
2527 2534 stream = remote.capable('stream-preferred')
2528 2535
2529 2536 if stream and not heads:
2530 2537 # 'stream' means remote revlog format is revlogv1 only
2531 2538 if remote.capable('stream'):
2532 2539 return self.stream_in(remote, set(('revlogv1',)))
2533 2540 # otherwise, 'streamreqs' contains the remote revlog format
2534 2541 streamreqs = remote.capable('streamreqs')
2535 2542 if streamreqs:
2536 2543 streamreqs = set(streamreqs.split(','))
2537 2544 # if we support it, stream in and adjust our requirements
2538 2545 if not streamreqs - self.supportedformats:
2539 2546 return self.stream_in(remote, streamreqs)
2540 2547 return self.pull(remote, heads)
2541 2548
2542 2549 def pushkey(self, namespace, key, old, new):
2543 2550 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2544 2551 old=old, new=new)
2545 2552 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2546 2553 ret = pushkey.push(self, namespace, key, old, new)
2547 2554 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2548 2555 ret=ret)
2549 2556 return ret
2550 2557
2551 2558 def listkeys(self, namespace):
2552 2559 self.hook('prelistkeys', throw=True, namespace=namespace)
2553 2560 self.ui.debug('listing keys for "%s"\n' % namespace)
2554 2561 values = pushkey.list(self, namespace)
2555 2562 self.hook('listkeys', namespace=namespace, values=values)
2556 2563 return values
2557 2564
2558 2565 def debugwireargs(self, one, two, three=None, four=None, five=None):
2559 2566 '''used to test argument passing over the wire'''
2560 2567 return "%s %s %s %s %s" % (one, two, three, four, five)
2561 2568
2562 2569 def savecommitmessage(self, text):
2563 2570 fp = self.opener('last-message.txt', 'wb')
2564 2571 try:
2565 2572 fp.write(text)
2566 2573 finally:
2567 2574 fp.close()
2568 2575 return self.pathto(fp.name[len(self.root)+1:])
2569 2576
2570 2577 # used to avoid circular references so destructors work
2571 2578 def aftertrans(files):
2572 2579 renamefiles = [tuple(t) for t in files]
2573 2580 def a():
2574 2581 for src, dest in renamefiles:
2575 2582 try:
2576 2583 util.rename(src, dest)
2577 2584 except OSError: # journal file does not yet exist
2578 2585 pass
2579 2586 return a
2580 2587
2581 2588 def undoname(fn):
2582 2589 base, name = os.path.split(fn)
2583 2590 assert name.startswith('journal')
2584 2591 return os.path.join(base, name.replace('journal', 'undo', 1))
2585 2592
2586 2593 def instance(ui, path, create):
2587 2594 return localrepository(ui, util.urllocalpath(path), create)
2588 2595
2589 2596 def islocal(path):
2590 2597 return True
@@ -1,301 +1,322
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23
24 24 Format
25 25 ------
26 26
27 27 Markers are stored in an append-only file stored in
28 28 '.hg/store/obsstore'.
29 29
30 30 The file starts with a version header:
31 31
32 32 - 1 unsigned byte: version number, starting at zero.
33 33
34 34
35 35 The header is followed by the markers. Each marker is made of:
36 36
37 37 - 1 unsigned byte: number of new changesets "R", could be zero.
38 38
39 39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
40 40
41 41 - 1 byte: a bit field. It is reserved for flags used in obsolete
42 42 markers common operations, to avoid repeated decoding of metadata
43 43 entries.
44 44
45 45 - 20 bytes: obsoleted changeset identifier.
46 46
47 47 - N*20 bytes: new changesets identifiers.
48 48
49 49 - M bytes: metadata as a sequence of nul-terminated strings. Each
50 50 string contains a key and a value, separated by a color ':', without
51 51 additional encoding. Keys cannot contain '\0' or ':' and values
52 52 cannot contain '\0'.
53 53 """
54 54 import struct
55 55 from mercurial import util, base85
56 56 from i18n import _
57 57
58 58 _pack = struct.pack
59 59 _unpack = struct.unpack
60 60
61 61
62 62
63 63 # data used for parsing and writing
64 64 _fmversion = 0
65 65 _fmfixed = '>BIB20s'
66 66 _fmnode = '20s'
67 67 _fmfsize = struct.calcsize(_fmfixed)
68 68 _fnodesize = struct.calcsize(_fmnode)
69 69
70 70 def _readmarkers(data):
71 71 """Read and enumerate markers from raw data"""
72 72 off = 0
73 73 diskversion = _unpack('>B', data[off:off + 1])[0]
74 74 off += 1
75 75 if diskversion != _fmversion:
76 76 raise util.Abort(_('parsing obsolete marker: unknown version %r')
77 77 % diskversion)
78 78
79 79 # Loop on markers
80 80 l = len(data)
81 81 while off + _fmfsize <= l:
82 82 # read fixed part
83 83 cur = data[off:off + _fmfsize]
84 84 off += _fmfsize
85 85 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
86 86 # read replacement
87 87 sucs = ()
88 88 if nbsuc:
89 89 s = (_fnodesize * nbsuc)
90 90 cur = data[off:off + s]
91 91 sucs = _unpack(_fmnode * nbsuc, cur)
92 92 off += s
93 93 # read metadata
94 94 # (metadata will be decoded on demand)
95 95 metadata = data[off:off + mdsize]
96 96 if len(metadata) != mdsize:
97 97 raise util.Abort(_('parsing obsolete marker: metadata is too '
98 98 'short, %d bytes expected, got %d')
99 99 % (mdsize, len(metadata)))
100 100 off += mdsize
101 101 yield (pre, sucs, flags, metadata)
102 102
103 103 def encodemeta(meta):
104 104 """Return encoded metadata string to string mapping.
105 105
106 106 Assume no ':' in key and no '\0' in both key and value."""
107 107 for key, value in meta.iteritems():
108 108 if ':' in key or '\0' in key:
109 109 raise ValueError("':' and '\0' are forbidden in metadata key'")
110 110 if '\0' in value:
111 111 raise ValueError("':' are forbidden in metadata value'")
112 112 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
113 113
114 114 def decodemeta(data):
115 115 """Return string to string dictionary from encoded version."""
116 116 d = {}
117 117 for l in data.split('\0'):
118 118 if l:
119 119 key, value = l.split(':')
120 120 d[key] = value
121 121 return d
122 122
123 123 class marker(object):
124 124 """Wrap obsolete marker raw data"""
125 125
126 126 def __init__(self, repo, data):
127 127 # the repo argument will be used to create changectx in later version
128 128 self._repo = repo
129 129 self._data = data
130 130 self._decodedmeta = None
131 131
132 132 def precnode(self):
133 133 """Precursor changeset node identifier"""
134 134 return self._data[0]
135 135
136 136 def succnodes(self):
137 137 """List of successor changesets node identifiers"""
138 138 return self._data[1]
139 139
140 140 def metadata(self):
141 141 """Decoded metadata dictionary"""
142 142 if self._decodedmeta is None:
143 143 self._decodedmeta = decodemeta(self._data[3])
144 144 return self._decodedmeta
145 145
146 146 def date(self):
147 147 """Creation date as (unixtime, offset)"""
148 148 parts = self.metadata()['date'].split(' ')
149 149 return (float(parts[0]), int(parts[1]))
150 150
151 151 class obsstore(object):
152 152 """Store obsolete markers
153 153
154 154 Markers can be accessed with two mappings:
155 155 - precursors: old -> set(new)
156 156 - successors: new -> set(old)
157 157 """
158 158
159 159 def __init__(self, sopener):
160 160 self._all = []
161 161 # new markers to serialize
162 162 self.precursors = {}
163 163 self.successors = {}
164 164 self.sopener = sopener
165 165 data = sopener.tryread('obsstore')
166 166 if data:
167 167 self._load(_readmarkers(data))
168 168
169 169 def __iter__(self):
170 170 return iter(self._all)
171 171
172 172 def __nonzero__(self):
173 173 return bool(self._all)
174 174
175 175 def create(self, transaction, prec, succs=(), flag=0, metadata=None):
176 176 """obsolete: add a new obsolete marker
177 177
178 178 * ensuring it is hashable
179 179 * check mandatory metadata
180 180 * encode metadata
181 181 """
182 182 if metadata is None:
183 183 metadata = {}
184 184 if len(prec) != 20:
185 185 raise ValueError(prec)
186 186 for succ in succs:
187 187 if len(succ) != 20:
188 188 raise ValueError(succ)
189 189 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
190 190 self.add(transaction, [marker])
191 191
192 192 def add(self, transaction, markers):
193 193 """Add new markers to the store
194 194
195 195 Take care of filtering duplicate.
196 196 Return the number of new marker."""
197 197 new = [m for m in markers if m not in self._all]
198 198 if new:
199 199 f = self.sopener('obsstore', 'ab')
200 200 try:
201 201 # Whether the file's current position is at the begin or at
202 202 # the end after opening a file for appending is implementation
203 203 # defined. So we must seek to the end before calling tell(),
204 204 # or we may get a zero offset for non-zero sized files on
205 205 # some platforms (issue3543).
206 206 f.seek(0, 2) # os.SEEK_END
207 207 offset = f.tell()
208 208 transaction.add('obsstore', offset)
209 209 # offset == 0: new file - add the version header
210 210 for bytes in _encodemarkers(new, offset == 0):
211 211 f.write(bytes)
212 212 finally:
213 213 # XXX: f.close() == filecache invalidation == obsstore rebuilt.
214 214 # call 'filecacheentry.refresh()' here
215 215 f.close()
216 216 self._load(new)
217 217 return len(new)
218 218
219 219 def mergemarkers(self, transation, data):
220 220 markers = _readmarkers(data)
221 221 self.add(transation, markers)
222 222
223 223 def _load(self, markers):
224 224 for mark in markers:
225 225 self._all.append(mark)
226 226 pre, sucs = mark[:2]
227 227 self.precursors.setdefault(pre, set()).add(mark)
228 228 for suc in sucs:
229 229 self.successors.setdefault(suc, set()).add(mark)
230 230
231 231 def _encodemarkers(markers, addheader=False):
232 232 # Kept separate from flushmarkers(), it will be reused for
233 233 # markers exchange.
234 234 if addheader:
235 235 yield _pack('>B', _fmversion)
236 236 for marker in markers:
237 pre, sucs, flags, metadata = marker
238 nbsuc = len(sucs)
239 format = _fmfixed + (_fmnode * nbsuc)
240 data = [nbsuc, len(metadata), flags, pre]
241 data.extend(sucs)
242 yield _pack(format, *data)
243 yield metadata
237 yield _encodeonemarker(marker)
238
239
240 def _encodeonemarker(marker):
241 pre, sucs, flags, metadata = marker
242 nbsuc = len(sucs)
243 format = _fmfixed + (_fmnode * nbsuc)
244 data = [nbsuc, len(metadata), flags, pre]
245 data.extend(sucs)
246 return _pack(format, *data) + metadata
247
248 # arbitrary picked to fit into 8K limit from HTTP server
249 # you have to take in account:
250 # - the version header
251 # - the base85 encoding
252 _maxpayload = 5300
244 253
245 254 def listmarkers(repo):
246 255 """List markers over pushkey"""
247 256 if not repo.obsstore:
248 257 return {}
249 markers = _encodemarkers(repo.obsstore, True)
250 return {'dump': base85.b85encode(''.join(markers))}
258 keys = {}
259 parts = []
260 currentlen = _maxpayload * 2 # ensure we create a new part
261 for marker in repo.obsstore:
262 nextdata = _encodeonemarker(marker)
263 if (len(nextdata) + currentlen > _maxpayload):
264 currentpart = []
265 currentlen = 0
266 parts.append(currentpart)
267 currentpart.append(nextdata)
268 for idx, part in enumerate(reversed(parts)):
269 data = ''.join([_pack('>B', _fmversion)] + part)
270 keys['dump%i' % idx] = base85.b85encode(data)
271 return keys
251 272
252 273 def pushmarker(repo, key, old, new):
253 274 """Push markers over pushkey"""
254 if key != 'dump':
275 if not key.startswith('dump'):
255 276 repo.ui.warn(_('unknown key: %r') % key)
256 277 return 0
257 278 if old:
258 279 repo.ui.warn(_('unexpected old value') % key)
259 280 return 0
260 281 data = base85.b85decode(new)
261 282 lock = repo.lock()
262 283 try:
263 284 tr = repo.transaction('pushkey: obsolete markers')
264 285 try:
265 286 repo.obsstore.mergemarkers(tr, data)
266 287 tr.close()
267 288 return 1
268 289 finally:
269 290 tr.release()
270 291 finally:
271 292 lock.release()
272 293
273 294 def allmarkers(repo):
274 295 """all obsolete markers known in a repository"""
275 296 for markerdata in repo.obsstore:
276 297 yield marker(repo, markerdata)
277 298
278 299 def precursormarkers(ctx):
279 300 """obsolete marker making this changeset obsolete"""
280 301 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
281 302 yield marker(ctx._repo, data)
282 303
283 304 def successormarkers(ctx):
284 305 """obsolete marker marking this changeset as a successors"""
285 306 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
286 307 yield marker(ctx._repo, data)
287 308
288 309 def anysuccessors(obsstore, node):
289 310 """Yield every successor of <node>
290 311
291 312 This this a linear yield unsuitable to detect splitted changeset."""
292 313 remaining = set([node])
293 314 seen = set(remaining)
294 315 while remaining:
295 316 current = remaining.pop()
296 317 yield current
297 318 for mark in obsstore.precursors.get(current, ()):
298 319 for suc in mark[1]:
299 320 if suc not in seen:
300 321 seen.add(suc)
301 322 remaining.add(suc)
General Comments 0
You need to be logged in to leave comments. Login now