##// END OF EJS Templates
branchmap: _updatebranchmap does not need to be filtered...
Pierre-Yves David -
r18119:5264464b default
parent child Browse files
Show More
@@ -1,2680 +1,2679 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return discovery.visiblebranchmap(self._repo)
95 95
96 96 def heads(self):
97 97 return discovery.visibleheads(self._repo)
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 150
151 151 def _baserequirements(self, create):
152 152 return self.requirements[:]
153 153
154 154 def __init__(self, baseui, path=None, create=False):
155 155 self.wvfs = scmutil.vfs(path, expand=True)
156 156 self.wopener = self.wvfs
157 157 self.root = self.wvfs.base
158 158 self.path = self.wvfs.join(".hg")
159 159 self.origroot = path
160 160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
161 161 self.vfs = scmutil.vfs(self.path)
162 162 self.opener = self.vfs
163 163 self.baseui = baseui
164 164 self.ui = baseui.copy()
165 165 # A list of callback to shape the phase if no data were found.
166 166 # Callback are in the form: func(repo, roots) --> processed root.
167 167 # This list it to be filled by extension during repo setup
168 168 self._phasedefaults = []
169 169 try:
170 170 self.ui.readconfig(self.join("hgrc"), self.root)
171 171 extensions.loadall(self.ui)
172 172 except IOError:
173 173 pass
174 174
175 175 if not self.vfs.isdir():
176 176 if create:
177 177 if not self.wvfs.exists():
178 178 self.wvfs.makedirs()
179 179 self.vfs.makedir(notindexed=True)
180 180 requirements = self._baserequirements(create)
181 181 if self.ui.configbool('format', 'usestore', True):
182 182 self.vfs.mkdir("store")
183 183 requirements.append("store")
184 184 if self.ui.configbool('format', 'usefncache', True):
185 185 requirements.append("fncache")
186 186 if self.ui.configbool('format', 'dotencode', True):
187 187 requirements.append('dotencode')
188 188 # create an invalid changelog
189 189 self.vfs.append(
190 190 "00changelog.i",
191 191 '\0\0\0\2' # represents revlogv2
192 192 ' dummy changelog to prevent using the old repo layout'
193 193 )
194 194 if self.ui.configbool('format', 'generaldelta', False):
195 195 requirements.append("generaldelta")
196 196 requirements = set(requirements)
197 197 else:
198 198 raise error.RepoError(_("repository %s not found") % path)
199 199 elif create:
200 200 raise error.RepoError(_("repository %s already exists") % path)
201 201 else:
202 202 try:
203 203 requirements = scmutil.readrequires(self.vfs, self.supported)
204 204 except IOError, inst:
205 205 if inst.errno != errno.ENOENT:
206 206 raise
207 207 requirements = set()
208 208
209 209 self.sharedpath = self.path
210 210 try:
211 211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
212 212 if not os.path.exists(s):
213 213 raise error.RepoError(
214 214 _('.hg/sharedpath points to nonexistent directory %s') % s)
215 215 self.sharedpath = s
216 216 except IOError, inst:
217 217 if inst.errno != errno.ENOENT:
218 218 raise
219 219
220 220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
221 221 self.spath = self.store.path
222 222 self.svfs = self.store.vfs
223 223 self.sopener = self.svfs
224 224 self.sjoin = self.store.join
225 225 self.vfs.createmode = self.store.createmode
226 226 self._applyrequirements(requirements)
227 227 if create:
228 228 self._writerequirements()
229 229
230 230
231 231 self._branchcache = None
232 232 self._branchcachetip = None
233 233 self.filterpats = {}
234 234 self._datafilters = {}
235 235 self._transref = self._lockref = self._wlockref = None
236 236
237 237 # A cache for various files under .hg/ that tracks file changes,
238 238 # (used by the filecache decorator)
239 239 #
240 240 # Maps a property name to its util.filecacheentry
241 241 self._filecache = {}
242 242
243 243 # hold sets of revision to be filtered
244 244 # should be cleared when something might have changed the filter value:
245 245 # - new changesets,
246 246 # - phase change,
247 247 # - new obsolescence marker,
248 248 # - working directory parent change,
249 249 # - bookmark changes
250 250 self.filteredrevcache = {}
251 251
252 252 def close(self):
253 253 pass
254 254
255 255 def _restrictcapabilities(self, caps):
256 256 return caps
257 257
258 258 def _applyrequirements(self, requirements):
259 259 self.requirements = requirements
260 260 self.sopener.options = dict((r, 1) for r in requirements
261 261 if r in self.openerreqs)
262 262
263 263 def _writerequirements(self):
264 264 reqfile = self.opener("requires", "w")
265 265 for r in self.requirements:
266 266 reqfile.write("%s\n" % r)
267 267 reqfile.close()
268 268
269 269 def _checknested(self, path):
270 270 """Determine if path is a legal nested repository."""
271 271 if not path.startswith(self.root):
272 272 return False
273 273 subpath = path[len(self.root) + 1:]
274 274 normsubpath = util.pconvert(subpath)
275 275
276 276 # XXX: Checking against the current working copy is wrong in
277 277 # the sense that it can reject things like
278 278 #
279 279 # $ hg cat -r 10 sub/x.txt
280 280 #
281 281 # if sub/ is no longer a subrepository in the working copy
282 282 # parent revision.
283 283 #
284 284 # However, it can of course also allow things that would have
285 285 # been rejected before, such as the above cat command if sub/
286 286 # is a subrepository now, but was a normal directory before.
287 287 # The old path auditor would have rejected by mistake since it
288 288 # panics when it sees sub/.hg/.
289 289 #
290 290 # All in all, checking against the working copy seems sensible
291 291 # since we want to prevent access to nested repositories on
292 292 # the filesystem *now*.
293 293 ctx = self[None]
294 294 parts = util.splitpath(subpath)
295 295 while parts:
296 296 prefix = '/'.join(parts)
297 297 if prefix in ctx.substate:
298 298 if prefix == normsubpath:
299 299 return True
300 300 else:
301 301 sub = ctx.sub(prefix)
302 302 return sub.checknested(subpath[len(prefix) + 1:])
303 303 else:
304 304 parts.pop()
305 305 return False
306 306
307 307 def peer(self):
308 308 return localpeer(self) # not cached to avoid reference cycle
309 309
310 310 def unfiltered(self):
311 311 """Return unfiltered version of the repository
312 312
313 313 Intended to be ovewritten by filtered repo."""
314 314 return self
315 315
316 316 def filtered(self, name):
317 317 """Return a filtered version of a repository"""
318 318 # build a new class with the mixin and the current class
319 319 # (possibily subclass of the repo)
320 320 class proxycls(repoview.repoview, self.unfiltered().__class__):
321 321 pass
322 322 return proxycls(self, name)
323 323
324 324 @repofilecache('bookmarks')
325 325 def _bookmarks(self):
326 326 return bookmarks.bmstore(self)
327 327
328 328 @repofilecache('bookmarks.current')
329 329 def _bookmarkcurrent(self):
330 330 return bookmarks.readcurrent(self)
331 331
332 332 def bookmarkheads(self, bookmark):
333 333 name = bookmark.split('@', 1)[0]
334 334 heads = []
335 335 for mark, n in self._bookmarks.iteritems():
336 336 if mark.split('@', 1)[0] == name:
337 337 heads.append(n)
338 338 return heads
339 339
340 340 @storecache('phaseroots')
341 341 def _phasecache(self):
342 342 return phases.phasecache(self, self._phasedefaults)
343 343
344 344 @storecache('obsstore')
345 345 def obsstore(self):
346 346 store = obsolete.obsstore(self.sopener)
347 347 if store and not obsolete._enabled:
348 348 # message is rare enough to not be translated
349 349 msg = 'obsolete feature not enabled but %i markers found!\n'
350 350 self.ui.warn(msg % len(list(store)))
351 351 return store
352 352
353 353 @unfilteredpropertycache
354 354 def hiddenrevs(self):
355 355 """hiddenrevs: revs that should be hidden by command and tools
356 356
357 357 This set is carried on the repo to ease initialization and lazy
358 358 loading; it'll probably move back to changelog for efficiency and
359 359 consistency reasons.
360 360
361 361 Note that the hiddenrevs will needs invalidations when
362 362 - a new changesets is added (possible unstable above extinct)
363 363 - a new obsolete marker is added (possible new extinct changeset)
364 364
365 365 hidden changesets cannot have non-hidden descendants
366 366 """
367 367 hidden = set()
368 368 if self.obsstore:
369 369 ### hide extinct changeset that are not accessible by any mean
370 370 hiddenquery = 'extinct() - ::(. + bookmark())'
371 371 hidden.update(self.revs(hiddenquery))
372 372 return hidden
373 373
374 374 @storecache('00changelog.i')
375 375 def changelog(self):
376 376 c = changelog.changelog(self.sopener)
377 377 if 'HG_PENDING' in os.environ:
378 378 p = os.environ['HG_PENDING']
379 379 if p.startswith(self.root):
380 380 c.readpending('00changelog.i.a')
381 381 return c
382 382
383 383 @storecache('00manifest.i')
384 384 def manifest(self):
385 385 return manifest.manifest(self.sopener)
386 386
387 387 @repofilecache('dirstate')
388 388 def dirstate(self):
389 389 warned = [0]
390 390 def validate(node):
391 391 try:
392 392 self.changelog.rev(node)
393 393 return node
394 394 except error.LookupError:
395 395 if not warned[0]:
396 396 warned[0] = True
397 397 self.ui.warn(_("warning: ignoring unknown"
398 398 " working parent %s!\n") % short(node))
399 399 return nullid
400 400
401 401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
402 402
403 403 def __getitem__(self, changeid):
404 404 if changeid is None:
405 405 return context.workingctx(self)
406 406 return context.changectx(self, changeid)
407 407
408 408 def __contains__(self, changeid):
409 409 try:
410 410 return bool(self.lookup(changeid))
411 411 except error.RepoLookupError:
412 412 return False
413 413
414 414 def __nonzero__(self):
415 415 return True
416 416
417 417 def __len__(self):
418 418 return len(self.changelog)
419 419
420 420 def __iter__(self):
421 421 return iter(self.changelog)
422 422
423 423 def revs(self, expr, *args):
424 424 '''Return a list of revisions matching the given revset'''
425 425 expr = revset.formatspec(expr, *args)
426 426 m = revset.match(None, expr)
427 427 return [r for r in m(self, list(self))]
428 428
429 429 def set(self, expr, *args):
430 430 '''
431 431 Yield a context for each matching revision, after doing arg
432 432 replacement via revset.formatspec
433 433 '''
434 434 for r in self.revs(expr, *args):
435 435 yield self[r]
436 436
437 437 def url(self):
438 438 return 'file:' + self.root
439 439
440 440 def hook(self, name, throw=False, **args):
441 441 return hook.hook(self.ui, self, name, throw, **args)
442 442
443 443 @unfilteredmethod
444 444 def _tag(self, names, node, message, local, user, date, extra={}):
445 445 if isinstance(names, str):
446 446 names = (names,)
447 447
448 448 branches = self.branchmap()
449 449 for name in names:
450 450 self.hook('pretag', throw=True, node=hex(node), tag=name,
451 451 local=local)
452 452 if name in branches:
453 453 self.ui.warn(_("warning: tag %s conflicts with existing"
454 454 " branch name\n") % name)
455 455
456 456 def writetags(fp, names, munge, prevtags):
457 457 fp.seek(0, 2)
458 458 if prevtags and prevtags[-1] != '\n':
459 459 fp.write('\n')
460 460 for name in names:
461 461 m = munge and munge(name) or name
462 462 if (self._tagscache.tagtypes and
463 463 name in self._tagscache.tagtypes):
464 464 old = self.tags().get(name, nullid)
465 465 fp.write('%s %s\n' % (hex(old), m))
466 466 fp.write('%s %s\n' % (hex(node), m))
467 467 fp.close()
468 468
469 469 prevtags = ''
470 470 if local:
471 471 try:
472 472 fp = self.opener('localtags', 'r+')
473 473 except IOError:
474 474 fp = self.opener('localtags', 'a')
475 475 else:
476 476 prevtags = fp.read()
477 477
478 478 # local tags are stored in the current charset
479 479 writetags(fp, names, None, prevtags)
480 480 for name in names:
481 481 self.hook('tag', node=hex(node), tag=name, local=local)
482 482 return
483 483
484 484 try:
485 485 fp = self.wfile('.hgtags', 'rb+')
486 486 except IOError, e:
487 487 if e.errno != errno.ENOENT:
488 488 raise
489 489 fp = self.wfile('.hgtags', 'ab')
490 490 else:
491 491 prevtags = fp.read()
492 492
493 493 # committed tags are stored in UTF-8
494 494 writetags(fp, names, encoding.fromlocal, prevtags)
495 495
496 496 fp.close()
497 497
498 498 self.invalidatecaches()
499 499
500 500 if '.hgtags' not in self.dirstate:
501 501 self[None].add(['.hgtags'])
502 502
503 503 m = matchmod.exact(self.root, '', ['.hgtags'])
504 504 tagnode = self.commit(message, user, date, extra=extra, match=m)
505 505
506 506 for name in names:
507 507 self.hook('tag', node=hex(node), tag=name, local=local)
508 508
509 509 return tagnode
510 510
511 511 def tag(self, names, node, message, local, user, date):
512 512 '''tag a revision with one or more symbolic names.
513 513
514 514 names is a list of strings or, when adding a single tag, names may be a
515 515 string.
516 516
517 517 if local is True, the tags are stored in a per-repository file.
518 518 otherwise, they are stored in the .hgtags file, and a new
519 519 changeset is committed with the change.
520 520
521 521 keyword arguments:
522 522
523 523 local: whether to store tags in non-version-controlled file
524 524 (default False)
525 525
526 526 message: commit message to use if committing
527 527
528 528 user: name of user to use if committing
529 529
530 530 date: date tuple to use if committing'''
531 531
532 532 if not local:
533 533 for x in self.status()[:5]:
534 534 if '.hgtags' in x:
535 535 raise util.Abort(_('working copy of .hgtags is changed '
536 536 '(please commit .hgtags manually)'))
537 537
538 538 self.tags() # instantiate the cache
539 539 self._tag(names, node, message, local, user, date)
540 540
541 541 @filteredpropertycache
542 542 def _tagscache(self):
543 543 '''Returns a tagscache object that contains various tags related
544 544 caches.'''
545 545
546 546 # This simplifies its cache management by having one decorated
547 547 # function (this one) and the rest simply fetch things from it.
548 548 class tagscache(object):
549 549 def __init__(self):
550 550 # These two define the set of tags for this repository. tags
551 551 # maps tag name to node; tagtypes maps tag name to 'global' or
552 552 # 'local'. (Global tags are defined by .hgtags across all
553 553 # heads, and local tags are defined in .hg/localtags.)
554 554 # They constitute the in-memory cache of tags.
555 555 self.tags = self.tagtypes = None
556 556
557 557 self.nodetagscache = self.tagslist = None
558 558
559 559 cache = tagscache()
560 560 cache.tags, cache.tagtypes = self._findtags()
561 561
562 562 return cache
563 563
564 564 def tags(self):
565 565 '''return a mapping of tag to node'''
566 566 t = {}
567 567 if self.changelog.filteredrevs:
568 568 tags, tt = self._findtags()
569 569 else:
570 570 tags = self._tagscache.tags
571 571 for k, v in tags.iteritems():
572 572 try:
573 573 # ignore tags to unknown nodes
574 574 self.changelog.rev(v)
575 575 t[k] = v
576 576 except (error.LookupError, ValueError):
577 577 pass
578 578 return t
579 579
580 580 def _findtags(self):
581 581 '''Do the hard work of finding tags. Return a pair of dicts
582 582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
583 583 maps tag name to a string like \'global\' or \'local\'.
584 584 Subclasses or extensions are free to add their own tags, but
585 585 should be aware that the returned dicts will be retained for the
586 586 duration of the localrepo object.'''
587 587
588 588 # XXX what tagtype should subclasses/extensions use? Currently
589 589 # mq and bookmarks add tags, but do not set the tagtype at all.
590 590 # Should each extension invent its own tag type? Should there
591 591 # be one tagtype for all such "virtual" tags? Or is the status
592 592 # quo fine?
593 593
594 594 alltags = {} # map tag name to (node, hist)
595 595 tagtypes = {}
596 596
597 597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
598 598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
599 599
600 600 # Build the return dicts. Have to re-encode tag names because
601 601 # the tags module always uses UTF-8 (in order not to lose info
602 602 # writing to the cache), but the rest of Mercurial wants them in
603 603 # local encoding.
604 604 tags = {}
605 605 for (name, (node, hist)) in alltags.iteritems():
606 606 if node != nullid:
607 607 tags[encoding.tolocal(name)] = node
608 608 tags['tip'] = self.changelog.tip()
609 609 tagtypes = dict([(encoding.tolocal(name), value)
610 610 for (name, value) in tagtypes.iteritems()])
611 611 return (tags, tagtypes)
612 612
613 613 def tagtype(self, tagname):
614 614 '''
615 615 return the type of the given tag. result can be:
616 616
617 617 'local' : a local tag
618 618 'global' : a global tag
619 619 None : tag does not exist
620 620 '''
621 621
622 622 return self._tagscache.tagtypes.get(tagname)
623 623
624 624 def tagslist(self):
625 625 '''return a list of tags ordered by revision'''
626 626 if not self._tagscache.tagslist:
627 627 l = []
628 628 for t, n in self.tags().iteritems():
629 629 r = self.changelog.rev(n)
630 630 l.append((r, t, n))
631 631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
632 632
633 633 return self._tagscache.tagslist
634 634
635 635 def nodetags(self, node):
636 636 '''return the tags associated with a node'''
637 637 if not self._tagscache.nodetagscache:
638 638 nodetagscache = {}
639 639 for t, n in self._tagscache.tags.iteritems():
640 640 nodetagscache.setdefault(n, []).append(t)
641 641 for tags in nodetagscache.itervalues():
642 642 tags.sort()
643 643 self._tagscache.nodetagscache = nodetagscache
644 644 return self._tagscache.nodetagscache.get(node, [])
645 645
646 646 def nodebookmarks(self, node):
647 647 marks = []
648 648 for bookmark, n in self._bookmarks.iteritems():
649 649 if n == node:
650 650 marks.append(bookmark)
651 651 return sorted(marks)
652 652
653 653 def _cacheabletip(self):
654 654 """tip-most revision stable enought to used in persistent cache
655 655
656 656 This function is overwritten by MQ to ensure we do not write cache for
657 657 a part of the history that will likely change.
658 658
659 659 Efficient handling of filtered revision in branchcache should offer a
660 660 better alternative. But we are using this approach until it is ready.
661 661 """
662 662 cl = self.changelog
663 663 return cl.rev(cl.tip())
664 664
665 665 @unfilteredmethod # Until we get a smarter cache management
666 666 def updatebranchcache(self):
667 667 cl = self.changelog
668 668 tip = cl.tip()
669 669 if self._branchcache is not None and self._branchcachetip == tip:
670 670 return
671 671
672 672 oldtip = self._branchcachetip
673 673 if oldtip is None or oldtip not in cl.nodemap:
674 674 partial, last, lrev = branchmap.read(self)
675 675 else:
676 676 lrev = cl.rev(oldtip)
677 677 partial = self._branchcache
678 678
679 679 catip = self._cacheabletip()
680 680 # if lrev == catip: cache is already up to date
681 681 # if lrev > catip: we have uncachable element in `partial` can't write
682 682 # on disk
683 683 if lrev < catip:
684 684 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip))
685 685 self._updatebranchcache(partial, ctxgen)
686 686 branchmap.write(self, partial, cl.node(catip), catip)
687 687 lrev = catip
688 688 # If cacheable tip were lower than actual tip, we need to update the
689 689 # cache up to tip. This update (from cacheable to actual tip) is not
690 690 # written to disk since it's not cacheable.
691 691 tiprev = len(self) - 1
692 692 if lrev < tiprev:
693 693 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev))
694 694 self._updatebranchcache(partial, ctxgen)
695 695 self._branchcache = partial
696 696 self._branchcachetip = tip
697 697
698 698 def branchmap(self):
699 699 '''returns a dictionary {branch: [branchheads]}'''
700 700 if self.changelog.filteredrevs:
701 701 # some changeset are excluded we can't use the cache
702 702 branchmap = {}
703 703 self._updatebranchcache(branchmap, (self[r] for r in self))
704 704 return branchmap
705 705 else:
706 706 self.updatebranchcache()
707 707 return self._branchcache
708 708
709 709
710 710 def _branchtip(self, heads):
711 711 '''return the tipmost branch head in heads'''
712 712 tip = heads[-1]
713 713 for h in reversed(heads):
714 714 if not self[h].closesbranch():
715 715 tip = h
716 716 break
717 717 return tip
718 718
719 719 def branchtip(self, branch):
720 720 '''return the tip node for a given branch'''
721 721 if branch not in self.branchmap():
722 722 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
723 723 return self._branchtip(self.branchmap()[branch])
724 724
725 725 def branchtags(self):
726 726 '''return a dict where branch names map to the tipmost head of
727 727 the branch, open heads come before closed'''
728 728 bt = {}
729 729 for bn, heads in self.branchmap().iteritems():
730 730 bt[bn] = self._branchtip(heads)
731 731 return bt
732 732
733 @unfilteredmethod # Until we get a smarter cache management
734 733 def _updatebranchcache(self, partial, ctxgen):
735 734 """Given a branchhead cache, partial, that may have extra nodes or be
736 735 missing heads, and a generator of nodes that are at least a superset of
737 736 heads missing, this function updates partial to be correct.
738 737 """
739 738 # collect new branch entries
740 739 newbranches = {}
741 740 for c in ctxgen:
742 741 newbranches.setdefault(c.branch(), []).append(c.node())
743 742 # if older branchheads are reachable from new ones, they aren't
744 743 # really branchheads. Note checking parents is insufficient:
745 744 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
746 745 for branch, newnodes in newbranches.iteritems():
747 746 bheads = partial.setdefault(branch, [])
748 747 # Remove candidate heads that no longer are in the repo (e.g., as
749 748 # the result of a strip that just happened). Avoid using 'node in
750 749 # self' here because that dives down into branchcache code somewhat
751 750 # recursively.
752 751 bheadrevs = [self.changelog.rev(node) for node in bheads
753 752 if self.changelog.hasnode(node)]
754 753 newheadrevs = [self.changelog.rev(node) for node in newnodes
755 754 if self.changelog.hasnode(node)]
756 755 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
757 756 # Remove duplicates - nodes that are in newheadrevs and are already
758 757 # in bheadrevs. This can happen if you strip a node whose parent
759 758 # was already a head (because they're on different branches).
760 759 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
761 760
762 761 # Starting from tip means fewer passes over reachable. If we know
763 762 # the new candidates are not ancestors of existing heads, we don't
764 763 # have to examine ancestors of existing heads
765 764 if ctxisnew:
766 765 iterrevs = sorted(newheadrevs)
767 766 else:
768 767 iterrevs = list(bheadrevs)
769 768
770 769 # This loop prunes out two kinds of heads - heads that are
771 770 # superseded by a head in newheadrevs, and newheadrevs that are not
772 771 # heads because an existing head is their descendant.
773 772 while iterrevs:
774 773 latest = iterrevs.pop()
775 774 if latest not in bheadrevs:
776 775 continue
777 776 ancestors = set(self.changelog.ancestors([latest],
778 777 bheadrevs[0]))
779 778 if ancestors:
780 779 bheadrevs = [b for b in bheadrevs if b not in ancestors]
781 780 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
782 781
783 782 # There may be branches that cease to exist when the last commit in the
784 783 # branch was stripped. This code filters them out. Note that the
785 784 # branch that ceased to exist may not be in newbranches because
786 785 # newbranches is the set of candidate heads, which when you strip the
787 786 # last commit in a branch will be the parent branch.
788 787 for branch in partial.keys():
789 788 nodes = [head for head in partial[branch]
790 789 if self.changelog.hasnode(head)]
791 790 if not nodes:
792 791 del partial[branch]
793 792
794 793 def lookup(self, key):
795 794 return self[key].node()
796 795
797 796 def lookupbranch(self, key, remote=None):
798 797 repo = remote or self
799 798 if key in repo.branchmap():
800 799 return key
801 800
802 801 repo = (remote and remote.local()) and remote or self
803 802 return repo[key].branch()
804 803
805 804 def known(self, nodes):
806 805 nm = self.changelog.nodemap
807 806 pc = self._phasecache
808 807 result = []
809 808 for n in nodes:
810 809 r = nm.get(n)
811 810 resp = not (r is None or pc.phase(self, r) >= phases.secret)
812 811 result.append(resp)
813 812 return result
814 813
815 814 def local(self):
816 815 return self
817 816
818 817 def cancopy(self):
819 818 return self.local() # so statichttprepo's override of local() works
820 819
821 820 def join(self, f):
822 821 return os.path.join(self.path, f)
823 822
824 823 def wjoin(self, f):
825 824 return os.path.join(self.root, f)
826 825
827 826 def file(self, f):
828 827 if f[0] == '/':
829 828 f = f[1:]
830 829 return filelog.filelog(self.sopener, f)
831 830
832 831 def changectx(self, changeid):
833 832 return self[changeid]
834 833
835 834 def parents(self, changeid=None):
836 835 '''get list of changectxs for parents of changeid'''
837 836 return self[changeid].parents()
838 837
839 838 def setparents(self, p1, p2=nullid):
840 839 copies = self.dirstate.setparents(p1, p2)
841 840 if copies:
842 841 # Adjust copy records, the dirstate cannot do it, it
843 842 # requires access to parents manifests. Preserve them
844 843 # only for entries added to first parent.
845 844 pctx = self[p1]
846 845 for f in copies:
847 846 if f not in pctx and copies[f] in pctx:
848 847 self.dirstate.copy(copies[f], f)
849 848
850 849 def filectx(self, path, changeid=None, fileid=None):
851 850 """changeid can be a changeset revision, node, or tag.
852 851 fileid can be a file revision or node."""
853 852 return context.filectx(self, path, changeid, fileid)
854 853
855 854 def getcwd(self):
856 855 return self.dirstate.getcwd()
857 856
858 857 def pathto(self, f, cwd=None):
859 858 return self.dirstate.pathto(f, cwd)
860 859
861 860 def wfile(self, f, mode='r'):
862 861 return self.wopener(f, mode)
863 862
864 863 def _link(self, f):
865 864 return os.path.islink(self.wjoin(f))
866 865
867 866 def _loadfilter(self, filter):
868 867 if filter not in self.filterpats:
869 868 l = []
870 869 for pat, cmd in self.ui.configitems(filter):
871 870 if cmd == '!':
872 871 continue
873 872 mf = matchmod.match(self.root, '', [pat])
874 873 fn = None
875 874 params = cmd
876 875 for name, filterfn in self._datafilters.iteritems():
877 876 if cmd.startswith(name):
878 877 fn = filterfn
879 878 params = cmd[len(name):].lstrip()
880 879 break
881 880 if not fn:
882 881 fn = lambda s, c, **kwargs: util.filter(s, c)
883 882 # Wrap old filters not supporting keyword arguments
884 883 if not inspect.getargspec(fn)[2]:
885 884 oldfn = fn
886 885 fn = lambda s, c, **kwargs: oldfn(s, c)
887 886 l.append((mf, fn, params))
888 887 self.filterpats[filter] = l
889 888 return self.filterpats[filter]
890 889
891 890 def _filter(self, filterpats, filename, data):
892 891 for mf, fn, cmd in filterpats:
893 892 if mf(filename):
894 893 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
895 894 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
896 895 break
897 896
898 897 return data
899 898
900 899 @unfilteredpropertycache
901 900 def _encodefilterpats(self):
902 901 return self._loadfilter('encode')
903 902
904 903 @unfilteredpropertycache
905 904 def _decodefilterpats(self):
906 905 return self._loadfilter('decode')
907 906
908 907 def adddatafilter(self, name, filter):
909 908 self._datafilters[name] = filter
910 909
911 910 def wread(self, filename):
912 911 if self._link(filename):
913 912 data = os.readlink(self.wjoin(filename))
914 913 else:
915 914 data = self.wopener.read(filename)
916 915 return self._filter(self._encodefilterpats, filename, data)
917 916
918 917 def wwrite(self, filename, data, flags):
919 918 data = self._filter(self._decodefilterpats, filename, data)
920 919 if 'l' in flags:
921 920 self.wopener.symlink(data, filename)
922 921 else:
923 922 self.wopener.write(filename, data)
924 923 if 'x' in flags:
925 924 util.setflags(self.wjoin(filename), False, True)
926 925
927 926 def wwritedata(self, filename, data):
928 927 return self._filter(self._decodefilterpats, filename, data)
929 928
930 929 def transaction(self, desc):
931 930 tr = self._transref and self._transref() or None
932 931 if tr and tr.running():
933 932 return tr.nest()
934 933
935 934 # abort here if the journal already exists
936 935 if os.path.exists(self.sjoin("journal")):
937 936 raise error.RepoError(
938 937 _("abandoned transaction found - run hg recover"))
939 938
940 939 self._writejournal(desc)
941 940 renames = [(x, undoname(x)) for x in self._journalfiles()]
942 941
943 942 tr = transaction.transaction(self.ui.warn, self.sopener,
944 943 self.sjoin("journal"),
945 944 aftertrans(renames),
946 945 self.store.createmode)
947 946 self._transref = weakref.ref(tr)
948 947 return tr
949 948
950 949 def _journalfiles(self):
951 950 return (self.sjoin('journal'), self.join('journal.dirstate'),
952 951 self.join('journal.branch'), self.join('journal.desc'),
953 952 self.join('journal.bookmarks'),
954 953 self.sjoin('journal.phaseroots'))
955 954
956 955 def undofiles(self):
957 956 return [undoname(x) for x in self._journalfiles()]
958 957
959 958 def _writejournal(self, desc):
960 959 self.opener.write("journal.dirstate",
961 960 self.opener.tryread("dirstate"))
962 961 self.opener.write("journal.branch",
963 962 encoding.fromlocal(self.dirstate.branch()))
964 963 self.opener.write("journal.desc",
965 964 "%d\n%s\n" % (len(self), desc))
966 965 self.opener.write("journal.bookmarks",
967 966 self.opener.tryread("bookmarks"))
968 967 self.sopener.write("journal.phaseroots",
969 968 self.sopener.tryread("phaseroots"))
970 969
971 970 def recover(self):
972 971 lock = self.lock()
973 972 try:
974 973 if os.path.exists(self.sjoin("journal")):
975 974 self.ui.status(_("rolling back interrupted transaction\n"))
976 975 transaction.rollback(self.sopener, self.sjoin("journal"),
977 976 self.ui.warn)
978 977 self.invalidate()
979 978 return True
980 979 else:
981 980 self.ui.warn(_("no interrupted transaction available\n"))
982 981 return False
983 982 finally:
984 983 lock.release()
985 984
986 985 def rollback(self, dryrun=False, force=False):
987 986 wlock = lock = None
988 987 try:
989 988 wlock = self.wlock()
990 989 lock = self.lock()
991 990 if os.path.exists(self.sjoin("undo")):
992 991 return self._rollback(dryrun, force)
993 992 else:
994 993 self.ui.warn(_("no rollback information available\n"))
995 994 return 1
996 995 finally:
997 996 release(lock, wlock)
998 997
999 998 @unfilteredmethod # Until we get smarter cache management
1000 999 def _rollback(self, dryrun, force):
1001 1000 ui = self.ui
1002 1001 try:
1003 1002 args = self.opener.read('undo.desc').splitlines()
1004 1003 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1005 1004 if len(args) >= 3:
1006 1005 detail = args[2]
1007 1006 oldtip = oldlen - 1
1008 1007
1009 1008 if detail and ui.verbose:
1010 1009 msg = (_('repository tip rolled back to revision %s'
1011 1010 ' (undo %s: %s)\n')
1012 1011 % (oldtip, desc, detail))
1013 1012 else:
1014 1013 msg = (_('repository tip rolled back to revision %s'
1015 1014 ' (undo %s)\n')
1016 1015 % (oldtip, desc))
1017 1016 except IOError:
1018 1017 msg = _('rolling back unknown transaction\n')
1019 1018 desc = None
1020 1019
1021 1020 if not force and self['.'] != self['tip'] and desc == 'commit':
1022 1021 raise util.Abort(
1023 1022 _('rollback of last commit while not checked out '
1024 1023 'may lose data'), hint=_('use -f to force'))
1025 1024
1026 1025 ui.status(msg)
1027 1026 if dryrun:
1028 1027 return 0
1029 1028
1030 1029 parents = self.dirstate.parents()
1031 1030 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1032 1031 if os.path.exists(self.join('undo.bookmarks')):
1033 1032 util.rename(self.join('undo.bookmarks'),
1034 1033 self.join('bookmarks'))
1035 1034 if os.path.exists(self.sjoin('undo.phaseroots')):
1036 1035 util.rename(self.sjoin('undo.phaseroots'),
1037 1036 self.sjoin('phaseroots'))
1038 1037 self.invalidate()
1039 1038
1040 1039 # Discard all cache entries to force reloading everything.
1041 1040 self._filecache.clear()
1042 1041
1043 1042 parentgone = (parents[0] not in self.changelog.nodemap or
1044 1043 parents[1] not in self.changelog.nodemap)
1045 1044 if parentgone:
1046 1045 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1047 1046 try:
1048 1047 branch = self.opener.read('undo.branch')
1049 1048 self.dirstate.setbranch(encoding.tolocal(branch))
1050 1049 except IOError:
1051 1050 ui.warn(_('named branch could not be reset: '
1052 1051 'current branch is still \'%s\'\n')
1053 1052 % self.dirstate.branch())
1054 1053
1055 1054 self.dirstate.invalidate()
1056 1055 parents = tuple([p.rev() for p in self.parents()])
1057 1056 if len(parents) > 1:
1058 1057 ui.status(_('working directory now based on '
1059 1058 'revisions %d and %d\n') % parents)
1060 1059 else:
1061 1060 ui.status(_('working directory now based on '
1062 1061 'revision %d\n') % parents)
1063 1062 # TODO: if we know which new heads may result from this rollback, pass
1064 1063 # them to destroy(), which will prevent the branchhead cache from being
1065 1064 # invalidated.
1066 1065 self.destroyed()
1067 1066 return 0
1068 1067
1069 1068 def invalidatecaches(self):
1070 1069
1071 1070 if '_tagscache' in vars(self):
1072 1071 # can't use delattr on proxy
1073 1072 del self.__dict__['_tagscache']
1074 1073
1075 1074 self.unfiltered()._branchcache = None # in UTF-8
1076 1075 self.unfiltered()._branchcachetip = None
1077 1076 self.invalidatevolatilesets()
1078 1077
1079 1078 def invalidatevolatilesets(self):
1080 1079 self.filteredrevcache.clear()
1081 1080 obsolete.clearobscaches(self)
1082 1081 if 'hiddenrevs' in vars(self):
1083 1082 del self.hiddenrevs
1084 1083
1085 1084 def invalidatedirstate(self):
1086 1085 '''Invalidates the dirstate, causing the next call to dirstate
1087 1086 to check if it was modified since the last time it was read,
1088 1087 rereading it if it has.
1089 1088
1090 1089 This is different to dirstate.invalidate() that it doesn't always
1091 1090 rereads the dirstate. Use dirstate.invalidate() if you want to
1092 1091 explicitly read the dirstate again (i.e. restoring it to a previous
1093 1092 known good state).'''
1094 1093 if hasunfilteredcache(self, 'dirstate'):
1095 1094 for k in self.dirstate._filecache:
1096 1095 try:
1097 1096 delattr(self.dirstate, k)
1098 1097 except AttributeError:
1099 1098 pass
1100 1099 delattr(self.unfiltered(), 'dirstate')
1101 1100
1102 1101 def invalidate(self):
1103 1102 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1104 1103 for k in self._filecache:
1105 1104 # dirstate is invalidated separately in invalidatedirstate()
1106 1105 if k == 'dirstate':
1107 1106 continue
1108 1107
1109 1108 try:
1110 1109 delattr(unfiltered, k)
1111 1110 except AttributeError:
1112 1111 pass
1113 1112 self.invalidatecaches()
1114 1113
1115 1114 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1116 1115 try:
1117 1116 l = lock.lock(lockname, 0, releasefn, desc=desc)
1118 1117 except error.LockHeld, inst:
1119 1118 if not wait:
1120 1119 raise
1121 1120 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1122 1121 (desc, inst.locker))
1123 1122 # default to 600 seconds timeout
1124 1123 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1125 1124 releasefn, desc=desc)
1126 1125 if acquirefn:
1127 1126 acquirefn()
1128 1127 return l
1129 1128
1130 1129 def _afterlock(self, callback):
1131 1130 """add a callback to the current repository lock.
1132 1131
1133 1132 The callback will be executed on lock release."""
1134 1133 l = self._lockref and self._lockref()
1135 1134 if l:
1136 1135 l.postrelease.append(callback)
1137 1136 else:
1138 1137 callback()
1139 1138
1140 1139 def lock(self, wait=True):
1141 1140 '''Lock the repository store (.hg/store) and return a weak reference
1142 1141 to the lock. Use this before modifying the store (e.g. committing or
1143 1142 stripping). If you are opening a transaction, get a lock as well.)'''
1144 1143 l = self._lockref and self._lockref()
1145 1144 if l is not None and l.held:
1146 1145 l.lock()
1147 1146 return l
1148 1147
1149 1148 def unlock():
1150 1149 self.store.write()
1151 1150 if hasunfilteredcache(self, '_phasecache'):
1152 1151 self._phasecache.write()
1153 1152 for k, ce in self._filecache.items():
1154 1153 if k == 'dirstate':
1155 1154 continue
1156 1155 ce.refresh()
1157 1156
1158 1157 l = self._lock(self.sjoin("lock"), wait, unlock,
1159 1158 self.invalidate, _('repository %s') % self.origroot)
1160 1159 self._lockref = weakref.ref(l)
1161 1160 return l
1162 1161
1163 1162 def wlock(self, wait=True):
1164 1163 '''Lock the non-store parts of the repository (everything under
1165 1164 .hg except .hg/store) and return a weak reference to the lock.
1166 1165 Use this before modifying files in .hg.'''
1167 1166 l = self._wlockref and self._wlockref()
1168 1167 if l is not None and l.held:
1169 1168 l.lock()
1170 1169 return l
1171 1170
1172 1171 def unlock():
1173 1172 self.dirstate.write()
1174 1173 ce = self._filecache.get('dirstate')
1175 1174 if ce:
1176 1175 ce.refresh()
1177 1176
1178 1177 l = self._lock(self.join("wlock"), wait, unlock,
1179 1178 self.invalidatedirstate, _('working directory of %s') %
1180 1179 self.origroot)
1181 1180 self._wlockref = weakref.ref(l)
1182 1181 return l
1183 1182
1184 1183 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1185 1184 """
1186 1185 commit an individual file as part of a larger transaction
1187 1186 """
1188 1187
1189 1188 fname = fctx.path()
1190 1189 text = fctx.data()
1191 1190 flog = self.file(fname)
1192 1191 fparent1 = manifest1.get(fname, nullid)
1193 1192 fparent2 = fparent2o = manifest2.get(fname, nullid)
1194 1193
1195 1194 meta = {}
1196 1195 copy = fctx.renamed()
1197 1196 if copy and copy[0] != fname:
1198 1197 # Mark the new revision of this file as a copy of another
1199 1198 # file. This copy data will effectively act as a parent
1200 1199 # of this new revision. If this is a merge, the first
1201 1200 # parent will be the nullid (meaning "look up the copy data")
1202 1201 # and the second one will be the other parent. For example:
1203 1202 #
1204 1203 # 0 --- 1 --- 3 rev1 changes file foo
1205 1204 # \ / rev2 renames foo to bar and changes it
1206 1205 # \- 2 -/ rev3 should have bar with all changes and
1207 1206 # should record that bar descends from
1208 1207 # bar in rev2 and foo in rev1
1209 1208 #
1210 1209 # this allows this merge to succeed:
1211 1210 #
1212 1211 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1213 1212 # \ / merging rev3 and rev4 should use bar@rev2
1214 1213 # \- 2 --- 4 as the merge base
1215 1214 #
1216 1215
1217 1216 cfname = copy[0]
1218 1217 crev = manifest1.get(cfname)
1219 1218 newfparent = fparent2
1220 1219
1221 1220 if manifest2: # branch merge
1222 1221 if fparent2 == nullid or crev is None: # copied on remote side
1223 1222 if cfname in manifest2:
1224 1223 crev = manifest2[cfname]
1225 1224 newfparent = fparent1
1226 1225
1227 1226 # find source in nearest ancestor if we've lost track
1228 1227 if not crev:
1229 1228 self.ui.debug(" %s: searching for copy revision for %s\n" %
1230 1229 (fname, cfname))
1231 1230 for ancestor in self[None].ancestors():
1232 1231 if cfname in ancestor:
1233 1232 crev = ancestor[cfname].filenode()
1234 1233 break
1235 1234
1236 1235 if crev:
1237 1236 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1238 1237 meta["copy"] = cfname
1239 1238 meta["copyrev"] = hex(crev)
1240 1239 fparent1, fparent2 = nullid, newfparent
1241 1240 else:
1242 1241 self.ui.warn(_("warning: can't find ancestor for '%s' "
1243 1242 "copied from '%s'!\n") % (fname, cfname))
1244 1243
1245 1244 elif fparent2 != nullid:
1246 1245 # is one parent an ancestor of the other?
1247 1246 fparentancestor = flog.ancestor(fparent1, fparent2)
1248 1247 if fparentancestor == fparent1:
1249 1248 fparent1, fparent2 = fparent2, nullid
1250 1249 elif fparentancestor == fparent2:
1251 1250 fparent2 = nullid
1252 1251
1253 1252 # is the file changed?
1254 1253 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1255 1254 changelist.append(fname)
1256 1255 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1257 1256
1258 1257 # are just the flags changed during merge?
1259 1258 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1260 1259 changelist.append(fname)
1261 1260
1262 1261 return fparent1
1263 1262
1264 1263 @unfilteredmethod
1265 1264 def commit(self, text="", user=None, date=None, match=None, force=False,
1266 1265 editor=False, extra={}):
1267 1266 """Add a new revision to current repository.
1268 1267
1269 1268 Revision information is gathered from the working directory,
1270 1269 match can be used to filter the committed files. If editor is
1271 1270 supplied, it is called to get a commit message.
1272 1271 """
1273 1272
1274 1273 def fail(f, msg):
1275 1274 raise util.Abort('%s: %s' % (f, msg))
1276 1275
1277 1276 if not match:
1278 1277 match = matchmod.always(self.root, '')
1279 1278
1280 1279 if not force:
1281 1280 vdirs = []
1282 1281 match.dir = vdirs.append
1283 1282 match.bad = fail
1284 1283
1285 1284 wlock = self.wlock()
1286 1285 try:
1287 1286 wctx = self[None]
1288 1287 merge = len(wctx.parents()) > 1
1289 1288
1290 1289 if (not force and merge and match and
1291 1290 (match.files() or match.anypats())):
1292 1291 raise util.Abort(_('cannot partially commit a merge '
1293 1292 '(do not specify files or patterns)'))
1294 1293
1295 1294 changes = self.status(match=match, clean=force)
1296 1295 if force:
1297 1296 changes[0].extend(changes[6]) # mq may commit unchanged files
1298 1297
1299 1298 # check subrepos
1300 1299 subs = []
1301 1300 commitsubs = set()
1302 1301 newstate = wctx.substate.copy()
1303 1302 # only manage subrepos and .hgsubstate if .hgsub is present
1304 1303 if '.hgsub' in wctx:
1305 1304 # we'll decide whether to track this ourselves, thanks
1306 1305 if '.hgsubstate' in changes[0]:
1307 1306 changes[0].remove('.hgsubstate')
1308 1307 if '.hgsubstate' in changes[2]:
1309 1308 changes[2].remove('.hgsubstate')
1310 1309
1311 1310 # compare current state to last committed state
1312 1311 # build new substate based on last committed state
1313 1312 oldstate = wctx.p1().substate
1314 1313 for s in sorted(newstate.keys()):
1315 1314 if not match(s):
1316 1315 # ignore working copy, use old state if present
1317 1316 if s in oldstate:
1318 1317 newstate[s] = oldstate[s]
1319 1318 continue
1320 1319 if not force:
1321 1320 raise util.Abort(
1322 1321 _("commit with new subrepo %s excluded") % s)
1323 1322 if wctx.sub(s).dirty(True):
1324 1323 if not self.ui.configbool('ui', 'commitsubrepos'):
1325 1324 raise util.Abort(
1326 1325 _("uncommitted changes in subrepo %s") % s,
1327 1326 hint=_("use --subrepos for recursive commit"))
1328 1327 subs.append(s)
1329 1328 commitsubs.add(s)
1330 1329 else:
1331 1330 bs = wctx.sub(s).basestate()
1332 1331 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1333 1332 if oldstate.get(s, (None, None, None))[1] != bs:
1334 1333 subs.append(s)
1335 1334
1336 1335 # check for removed subrepos
1337 1336 for p in wctx.parents():
1338 1337 r = [s for s in p.substate if s not in newstate]
1339 1338 subs += [s for s in r if match(s)]
1340 1339 if subs:
1341 1340 if (not match('.hgsub') and
1342 1341 '.hgsub' in (wctx.modified() + wctx.added())):
1343 1342 raise util.Abort(
1344 1343 _("can't commit subrepos without .hgsub"))
1345 1344 changes[0].insert(0, '.hgsubstate')
1346 1345
1347 1346 elif '.hgsub' in changes[2]:
1348 1347 # clean up .hgsubstate when .hgsub is removed
1349 1348 if ('.hgsubstate' in wctx and
1350 1349 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1351 1350 changes[2].insert(0, '.hgsubstate')
1352 1351
1353 1352 # make sure all explicit patterns are matched
1354 1353 if not force and match.files():
1355 1354 matched = set(changes[0] + changes[1] + changes[2])
1356 1355
1357 1356 for f in match.files():
1358 1357 f = self.dirstate.normalize(f)
1359 1358 if f == '.' or f in matched or f in wctx.substate:
1360 1359 continue
1361 1360 if f in changes[3]: # missing
1362 1361 fail(f, _('file not found!'))
1363 1362 if f in vdirs: # visited directory
1364 1363 d = f + '/'
1365 1364 for mf in matched:
1366 1365 if mf.startswith(d):
1367 1366 break
1368 1367 else:
1369 1368 fail(f, _("no match under directory!"))
1370 1369 elif f not in self.dirstate:
1371 1370 fail(f, _("file not tracked!"))
1372 1371
1373 1372 if (not force and not extra.get("close") and not merge
1374 1373 and not (changes[0] or changes[1] or changes[2])
1375 1374 and wctx.branch() == wctx.p1().branch()):
1376 1375 return None
1377 1376
1378 1377 if merge and changes[3]:
1379 1378 raise util.Abort(_("cannot commit merge with missing files"))
1380 1379
1381 1380 ms = mergemod.mergestate(self)
1382 1381 for f in changes[0]:
1383 1382 if f in ms and ms[f] == 'u':
1384 1383 raise util.Abort(_("unresolved merge conflicts "
1385 1384 "(see hg help resolve)"))
1386 1385
1387 1386 cctx = context.workingctx(self, text, user, date, extra, changes)
1388 1387 if editor:
1389 1388 cctx._text = editor(self, cctx, subs)
1390 1389 edited = (text != cctx._text)
1391 1390
1392 1391 # commit subs and write new state
1393 1392 if subs:
1394 1393 for s in sorted(commitsubs):
1395 1394 sub = wctx.sub(s)
1396 1395 self.ui.status(_('committing subrepository %s\n') %
1397 1396 subrepo.subrelpath(sub))
1398 1397 sr = sub.commit(cctx._text, user, date)
1399 1398 newstate[s] = (newstate[s][0], sr)
1400 1399 subrepo.writestate(self, newstate)
1401 1400
1402 1401 # Save commit message in case this transaction gets rolled back
1403 1402 # (e.g. by a pretxncommit hook). Leave the content alone on
1404 1403 # the assumption that the user will use the same editor again.
1405 1404 msgfn = self.savecommitmessage(cctx._text)
1406 1405
1407 1406 p1, p2 = self.dirstate.parents()
1408 1407 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1409 1408 try:
1410 1409 self.hook("precommit", throw=True, parent1=hookp1,
1411 1410 parent2=hookp2)
1412 1411 ret = self.commitctx(cctx, True)
1413 1412 except: # re-raises
1414 1413 if edited:
1415 1414 self.ui.write(
1416 1415 _('note: commit message saved in %s\n') % msgfn)
1417 1416 raise
1418 1417
1419 1418 # update bookmarks, dirstate and mergestate
1420 1419 bookmarks.update(self, [p1, p2], ret)
1421 1420 for f in changes[0] + changes[1]:
1422 1421 self.dirstate.normal(f)
1423 1422 for f in changes[2]:
1424 1423 self.dirstate.drop(f)
1425 1424 self.dirstate.setparents(ret)
1426 1425 ms.reset()
1427 1426 finally:
1428 1427 wlock.release()
1429 1428
1430 1429 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1431 1430 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1432 1431 self._afterlock(commithook)
1433 1432 return ret
1434 1433
1435 1434 @unfilteredmethod
1436 1435 def commitctx(self, ctx, error=False):
1437 1436 """Add a new revision to current repository.
1438 1437 Revision information is passed via the context argument.
1439 1438 """
1440 1439
1441 1440 tr = lock = None
1442 1441 removed = list(ctx.removed())
1443 1442 p1, p2 = ctx.p1(), ctx.p2()
1444 1443 user = ctx.user()
1445 1444
1446 1445 lock = self.lock()
1447 1446 try:
1448 1447 tr = self.transaction("commit")
1449 1448 trp = weakref.proxy(tr)
1450 1449
1451 1450 if ctx.files():
1452 1451 m1 = p1.manifest().copy()
1453 1452 m2 = p2.manifest()
1454 1453
1455 1454 # check in files
1456 1455 new = {}
1457 1456 changed = []
1458 1457 linkrev = len(self)
1459 1458 for f in sorted(ctx.modified() + ctx.added()):
1460 1459 self.ui.note(f + "\n")
1461 1460 try:
1462 1461 fctx = ctx[f]
1463 1462 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1464 1463 changed)
1465 1464 m1.set(f, fctx.flags())
1466 1465 except OSError, inst:
1467 1466 self.ui.warn(_("trouble committing %s!\n") % f)
1468 1467 raise
1469 1468 except IOError, inst:
1470 1469 errcode = getattr(inst, 'errno', errno.ENOENT)
1471 1470 if error or errcode and errcode != errno.ENOENT:
1472 1471 self.ui.warn(_("trouble committing %s!\n") % f)
1473 1472 raise
1474 1473 else:
1475 1474 removed.append(f)
1476 1475
1477 1476 # update manifest
1478 1477 m1.update(new)
1479 1478 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1480 1479 drop = [f for f in removed if f in m1]
1481 1480 for f in drop:
1482 1481 del m1[f]
1483 1482 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1484 1483 p2.manifestnode(), (new, drop))
1485 1484 files = changed + removed
1486 1485 else:
1487 1486 mn = p1.manifestnode()
1488 1487 files = []
1489 1488
1490 1489 # update changelog
1491 1490 self.changelog.delayupdate()
1492 1491 n = self.changelog.add(mn, files, ctx.description(),
1493 1492 trp, p1.node(), p2.node(),
1494 1493 user, ctx.date(), ctx.extra().copy())
1495 1494 p = lambda: self.changelog.writepending() and self.root or ""
1496 1495 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1497 1496 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1498 1497 parent2=xp2, pending=p)
1499 1498 self.changelog.finalize(trp)
1500 1499 # set the new commit is proper phase
1501 1500 targetphase = phases.newcommitphase(self.ui)
1502 1501 if targetphase:
1503 1502 # retract boundary do not alter parent changeset.
1504 1503 # if a parent have higher the resulting phase will
1505 1504 # be compliant anyway
1506 1505 #
1507 1506 # if minimal phase was 0 we don't need to retract anything
1508 1507 phases.retractboundary(self, targetphase, [n])
1509 1508 tr.close()
1510 1509 self.updatebranchcache()
1511 1510 return n
1512 1511 finally:
1513 1512 if tr:
1514 1513 tr.release()
1515 1514 lock.release()
1516 1515
1517 1516 @unfilteredmethod
1518 1517 def destroyed(self, newheadnodes=None):
1519 1518 '''Inform the repository that nodes have been destroyed.
1520 1519 Intended for use by strip and rollback, so there's a common
1521 1520 place for anything that has to be done after destroying history.
1522 1521
1523 1522 If you know the branchheadcache was uptodate before nodes were removed
1524 1523 and you also know the set of candidate new heads that may have resulted
1525 1524 from the destruction, you can set newheadnodes. This will enable the
1526 1525 code to update the branchheads cache, rather than having future code
1527 1526 decide it's invalid and regenerating it from scratch.
1528 1527 '''
1529 1528 # If we have info, newheadnodes, on how to update the branch cache, do
1530 1529 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1531 1530 # will be caught the next time it is read.
1532 1531 if newheadnodes:
1533 1532 tiprev = len(self) - 1
1534 1533 ctxgen = (self[node] for node in newheadnodes
1535 1534 if self.changelog.hasnode(node))
1536 1535 self._updatebranchcache(self._branchcache, ctxgen)
1537 1536 branchmap.write(self, self._branchcache, self.changelog.tip(),
1538 1537 tiprev)
1539 1538
1540 1539 # Ensure the persistent tag cache is updated. Doing it now
1541 1540 # means that the tag cache only has to worry about destroyed
1542 1541 # heads immediately after a strip/rollback. That in turn
1543 1542 # guarantees that "cachetip == currenttip" (comparing both rev
1544 1543 # and node) always means no nodes have been added or destroyed.
1545 1544
1546 1545 # XXX this is suboptimal when qrefresh'ing: we strip the current
1547 1546 # head, refresh the tag cache, then immediately add a new head.
1548 1547 # But I think doing it this way is necessary for the "instant
1549 1548 # tag cache retrieval" case to work.
1550 1549 self.invalidatecaches()
1551 1550
1552 1551 # Discard all cache entries to force reloading everything.
1553 1552 self._filecache.clear()
1554 1553
1555 1554 def walk(self, match, node=None):
1556 1555 '''
1557 1556 walk recursively through the directory tree or a given
1558 1557 changeset, finding all files matched by the match
1559 1558 function
1560 1559 '''
1561 1560 return self[node].walk(match)
1562 1561
1563 1562 def status(self, node1='.', node2=None, match=None,
1564 1563 ignored=False, clean=False, unknown=False,
1565 1564 listsubrepos=False):
1566 1565 """return status of files between two nodes or node and working
1567 1566 directory.
1568 1567
1569 1568 If node1 is None, use the first dirstate parent instead.
1570 1569 If node2 is None, compare node1 with working directory.
1571 1570 """
1572 1571
1573 1572 def mfmatches(ctx):
1574 1573 mf = ctx.manifest().copy()
1575 1574 if match.always():
1576 1575 return mf
1577 1576 for fn in mf.keys():
1578 1577 if not match(fn):
1579 1578 del mf[fn]
1580 1579 return mf
1581 1580
1582 1581 if isinstance(node1, context.changectx):
1583 1582 ctx1 = node1
1584 1583 else:
1585 1584 ctx1 = self[node1]
1586 1585 if isinstance(node2, context.changectx):
1587 1586 ctx2 = node2
1588 1587 else:
1589 1588 ctx2 = self[node2]
1590 1589
1591 1590 working = ctx2.rev() is None
1592 1591 parentworking = working and ctx1 == self['.']
1593 1592 match = match or matchmod.always(self.root, self.getcwd())
1594 1593 listignored, listclean, listunknown = ignored, clean, unknown
1595 1594
1596 1595 # load earliest manifest first for caching reasons
1597 1596 if not working and ctx2.rev() < ctx1.rev():
1598 1597 ctx2.manifest()
1599 1598
1600 1599 if not parentworking:
1601 1600 def bad(f, msg):
1602 1601 # 'f' may be a directory pattern from 'match.files()',
1603 1602 # so 'f not in ctx1' is not enough
1604 1603 if f not in ctx1 and f not in ctx1.dirs():
1605 1604 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1606 1605 match.bad = bad
1607 1606
1608 1607 if working: # we need to scan the working dir
1609 1608 subrepos = []
1610 1609 if '.hgsub' in self.dirstate:
1611 1610 subrepos = ctx2.substate.keys()
1612 1611 s = self.dirstate.status(match, subrepos, listignored,
1613 1612 listclean, listunknown)
1614 1613 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1615 1614
1616 1615 # check for any possibly clean files
1617 1616 if parentworking and cmp:
1618 1617 fixup = []
1619 1618 # do a full compare of any files that might have changed
1620 1619 for f in sorted(cmp):
1621 1620 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1622 1621 or ctx1[f].cmp(ctx2[f])):
1623 1622 modified.append(f)
1624 1623 else:
1625 1624 fixup.append(f)
1626 1625
1627 1626 # update dirstate for files that are actually clean
1628 1627 if fixup:
1629 1628 if listclean:
1630 1629 clean += fixup
1631 1630
1632 1631 try:
1633 1632 # updating the dirstate is optional
1634 1633 # so we don't wait on the lock
1635 1634 wlock = self.wlock(False)
1636 1635 try:
1637 1636 for f in fixup:
1638 1637 self.dirstate.normal(f)
1639 1638 finally:
1640 1639 wlock.release()
1641 1640 except error.LockError:
1642 1641 pass
1643 1642
1644 1643 if not parentworking:
1645 1644 mf1 = mfmatches(ctx1)
1646 1645 if working:
1647 1646 # we are comparing working dir against non-parent
1648 1647 # generate a pseudo-manifest for the working dir
1649 1648 mf2 = mfmatches(self['.'])
1650 1649 for f in cmp + modified + added:
1651 1650 mf2[f] = None
1652 1651 mf2.set(f, ctx2.flags(f))
1653 1652 for f in removed:
1654 1653 if f in mf2:
1655 1654 del mf2[f]
1656 1655 else:
1657 1656 # we are comparing two revisions
1658 1657 deleted, unknown, ignored = [], [], []
1659 1658 mf2 = mfmatches(ctx2)
1660 1659
1661 1660 modified, added, clean = [], [], []
1662 1661 withflags = mf1.withflags() | mf2.withflags()
1663 1662 for fn in mf2:
1664 1663 if fn in mf1:
1665 1664 if (fn not in deleted and
1666 1665 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1667 1666 (mf1[fn] != mf2[fn] and
1668 1667 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1669 1668 modified.append(fn)
1670 1669 elif listclean:
1671 1670 clean.append(fn)
1672 1671 del mf1[fn]
1673 1672 elif fn not in deleted:
1674 1673 added.append(fn)
1675 1674 removed = mf1.keys()
1676 1675
1677 1676 if working and modified and not self.dirstate._checklink:
1678 1677 # Symlink placeholders may get non-symlink-like contents
1679 1678 # via user error or dereferencing by NFS or Samba servers,
1680 1679 # so we filter out any placeholders that don't look like a
1681 1680 # symlink
1682 1681 sane = []
1683 1682 for f in modified:
1684 1683 if ctx2.flags(f) == 'l':
1685 1684 d = ctx2[f].data()
1686 1685 if len(d) >= 1024 or '\n' in d or util.binary(d):
1687 1686 self.ui.debug('ignoring suspect symlink placeholder'
1688 1687 ' "%s"\n' % f)
1689 1688 continue
1690 1689 sane.append(f)
1691 1690 modified = sane
1692 1691
1693 1692 r = modified, added, removed, deleted, unknown, ignored, clean
1694 1693
1695 1694 if listsubrepos:
1696 1695 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1697 1696 if working:
1698 1697 rev2 = None
1699 1698 else:
1700 1699 rev2 = ctx2.substate[subpath][1]
1701 1700 try:
1702 1701 submatch = matchmod.narrowmatcher(subpath, match)
1703 1702 s = sub.status(rev2, match=submatch, ignored=listignored,
1704 1703 clean=listclean, unknown=listunknown,
1705 1704 listsubrepos=True)
1706 1705 for rfiles, sfiles in zip(r, s):
1707 1706 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1708 1707 except error.LookupError:
1709 1708 self.ui.status(_("skipping missing subrepository: %s\n")
1710 1709 % subpath)
1711 1710
1712 1711 for l in r:
1713 1712 l.sort()
1714 1713 return r
1715 1714
1716 1715 def heads(self, start=None):
1717 1716 heads = self.changelog.heads(start)
1718 1717 # sort the output in rev descending order
1719 1718 return sorted(heads, key=self.changelog.rev, reverse=True)
1720 1719
1721 1720 def branchheads(self, branch=None, start=None, closed=False):
1722 1721 '''return a (possibly filtered) list of heads for the given branch
1723 1722
1724 1723 Heads are returned in topological order, from newest to oldest.
1725 1724 If branch is None, use the dirstate branch.
1726 1725 If start is not None, return only heads reachable from start.
1727 1726 If closed is True, return heads that are marked as closed as well.
1728 1727 '''
1729 1728 if branch is None:
1730 1729 branch = self[None].branch()
1731 1730 branches = self.branchmap()
1732 1731 if branch not in branches:
1733 1732 return []
1734 1733 # the cache returns heads ordered lowest to highest
1735 1734 bheads = list(reversed(branches[branch]))
1736 1735 if start is not None:
1737 1736 # filter out the heads that cannot be reached from startrev
1738 1737 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1739 1738 bheads = [h for h in bheads if h in fbheads]
1740 1739 if not closed:
1741 1740 bheads = [h for h in bheads if not self[h].closesbranch()]
1742 1741 return bheads
1743 1742
1744 1743 def branches(self, nodes):
1745 1744 if not nodes:
1746 1745 nodes = [self.changelog.tip()]
1747 1746 b = []
1748 1747 for n in nodes:
1749 1748 t = n
1750 1749 while True:
1751 1750 p = self.changelog.parents(n)
1752 1751 if p[1] != nullid or p[0] == nullid:
1753 1752 b.append((t, n, p[0], p[1]))
1754 1753 break
1755 1754 n = p[0]
1756 1755 return b
1757 1756
1758 1757 def between(self, pairs):
1759 1758 r = []
1760 1759
1761 1760 for top, bottom in pairs:
1762 1761 n, l, i = top, [], 0
1763 1762 f = 1
1764 1763
1765 1764 while n != bottom and n != nullid:
1766 1765 p = self.changelog.parents(n)[0]
1767 1766 if i == f:
1768 1767 l.append(n)
1769 1768 f = f * 2
1770 1769 n = p
1771 1770 i += 1
1772 1771
1773 1772 r.append(l)
1774 1773
1775 1774 return r
1776 1775
1777 1776 def pull(self, remote, heads=None, force=False):
1778 1777 # don't open transaction for nothing or you break future useful
1779 1778 # rollback call
1780 1779 tr = None
1781 1780 trname = 'pull\n' + util.hidepassword(remote.url())
1782 1781 lock = self.lock()
1783 1782 try:
1784 1783 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1785 1784 force=force)
1786 1785 common, fetch, rheads = tmp
1787 1786 if not fetch:
1788 1787 self.ui.status(_("no changes found\n"))
1789 1788 added = []
1790 1789 result = 0
1791 1790 else:
1792 1791 tr = self.transaction(trname)
1793 1792 if heads is None and list(common) == [nullid]:
1794 1793 self.ui.status(_("requesting all changes\n"))
1795 1794 elif heads is None and remote.capable('changegroupsubset'):
1796 1795 # issue1320, avoid a race if remote changed after discovery
1797 1796 heads = rheads
1798 1797
1799 1798 if remote.capable('getbundle'):
1800 1799 cg = remote.getbundle('pull', common=common,
1801 1800 heads=heads or rheads)
1802 1801 elif heads is None:
1803 1802 cg = remote.changegroup(fetch, 'pull')
1804 1803 elif not remote.capable('changegroupsubset'):
1805 1804 raise util.Abort(_("partial pull cannot be done because "
1806 1805 "other repository doesn't support "
1807 1806 "changegroupsubset."))
1808 1807 else:
1809 1808 cg = remote.changegroupsubset(fetch, heads, 'pull')
1810 1809 clstart = len(self.changelog)
1811 1810 result = self.addchangegroup(cg, 'pull', remote.url())
1812 1811 clend = len(self.changelog)
1813 1812 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1814 1813
1815 1814 # compute target subset
1816 1815 if heads is None:
1817 1816 # We pulled every thing possible
1818 1817 # sync on everything common
1819 1818 subset = common + added
1820 1819 else:
1821 1820 # We pulled a specific subset
1822 1821 # sync on this subset
1823 1822 subset = heads
1824 1823
1825 1824 # Get remote phases data from remote
1826 1825 remotephases = remote.listkeys('phases')
1827 1826 publishing = bool(remotephases.get('publishing', False))
1828 1827 if remotephases and not publishing:
1829 1828 # remote is new and unpublishing
1830 1829 pheads, _dr = phases.analyzeremotephases(self, subset,
1831 1830 remotephases)
1832 1831 phases.advanceboundary(self, phases.public, pheads)
1833 1832 phases.advanceboundary(self, phases.draft, subset)
1834 1833 else:
1835 1834 # Remote is old or publishing all common changesets
1836 1835 # should be seen as public
1837 1836 phases.advanceboundary(self, phases.public, subset)
1838 1837
1839 1838 if obsolete._enabled:
1840 1839 self.ui.debug('fetching remote obsolete markers\n')
1841 1840 remoteobs = remote.listkeys('obsolete')
1842 1841 if 'dump0' in remoteobs:
1843 1842 if tr is None:
1844 1843 tr = self.transaction(trname)
1845 1844 for key in sorted(remoteobs, reverse=True):
1846 1845 if key.startswith('dump'):
1847 1846 data = base85.b85decode(remoteobs[key])
1848 1847 self.obsstore.mergemarkers(tr, data)
1849 1848 self.invalidatevolatilesets()
1850 1849 if tr is not None:
1851 1850 tr.close()
1852 1851 finally:
1853 1852 if tr is not None:
1854 1853 tr.release()
1855 1854 lock.release()
1856 1855
1857 1856 return result
1858 1857
1859 1858 def checkpush(self, force, revs):
1860 1859 """Extensions can override this function if additional checks have
1861 1860 to be performed before pushing, or call it if they override push
1862 1861 command.
1863 1862 """
1864 1863 pass
1865 1864
1866 1865 def push(self, remote, force=False, revs=None, newbranch=False):
1867 1866 '''Push outgoing changesets (limited by revs) from the current
1868 1867 repository to remote. Return an integer:
1869 1868 - None means nothing to push
1870 1869 - 0 means HTTP error
1871 1870 - 1 means we pushed and remote head count is unchanged *or*
1872 1871 we have outgoing changesets but refused to push
1873 1872 - other values as described by addchangegroup()
1874 1873 '''
1875 1874 # there are two ways to push to remote repo:
1876 1875 #
1877 1876 # addchangegroup assumes local user can lock remote
1878 1877 # repo (local filesystem, old ssh servers).
1879 1878 #
1880 1879 # unbundle assumes local user cannot lock remote repo (new ssh
1881 1880 # servers, http servers).
1882 1881
1883 1882 if not remote.canpush():
1884 1883 raise util.Abort(_("destination does not support push"))
1885 1884 unfi = self.unfiltered()
1886 1885 # get local lock as we might write phase data
1887 1886 locallock = self.lock()
1888 1887 try:
1889 1888 self.checkpush(force, revs)
1890 1889 lock = None
1891 1890 unbundle = remote.capable('unbundle')
1892 1891 if not unbundle:
1893 1892 lock = remote.lock()
1894 1893 try:
1895 1894 # discovery
1896 1895 fci = discovery.findcommonincoming
1897 1896 commoninc = fci(unfi, remote, force=force)
1898 1897 common, inc, remoteheads = commoninc
1899 1898 fco = discovery.findcommonoutgoing
1900 1899 outgoing = fco(unfi, remote, onlyheads=revs,
1901 1900 commoninc=commoninc, force=force)
1902 1901
1903 1902
1904 1903 if not outgoing.missing:
1905 1904 # nothing to push
1906 1905 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1907 1906 ret = None
1908 1907 else:
1909 1908 # something to push
1910 1909 if not force:
1911 1910 # if self.obsstore == False --> no obsolete
1912 1911 # then, save the iteration
1913 1912 if unfi.obsstore:
1914 1913 # this message are here for 80 char limit reason
1915 1914 mso = _("push includes obsolete changeset: %s!")
1916 1915 msu = _("push includes unstable changeset: %s!")
1917 1916 msb = _("push includes bumped changeset: %s!")
1918 1917 msd = _("push includes divergent changeset: %s!")
1919 1918 # If we are to push if there is at least one
1920 1919 # obsolete or unstable changeset in missing, at
1921 1920 # least one of the missinghead will be obsolete or
1922 1921 # unstable. So checking heads only is ok
1923 1922 for node in outgoing.missingheads:
1924 1923 ctx = unfi[node]
1925 1924 if ctx.obsolete():
1926 1925 raise util.Abort(mso % ctx)
1927 1926 elif ctx.unstable():
1928 1927 raise util.Abort(msu % ctx)
1929 1928 elif ctx.bumped():
1930 1929 raise util.Abort(msb % ctx)
1931 1930 elif ctx.divergent():
1932 1931 raise util.Abort(msd % ctx)
1933 1932 discovery.checkheads(unfi, remote, outgoing,
1934 1933 remoteheads, newbranch,
1935 1934 bool(inc))
1936 1935
1937 1936 # create a changegroup from local
1938 1937 if revs is None and not outgoing.excluded:
1939 1938 # push everything,
1940 1939 # use the fast path, no race possible on push
1941 1940 cg = self._changegroup(outgoing.missing, 'push')
1942 1941 else:
1943 1942 cg = self.getlocalbundle('push', outgoing)
1944 1943
1945 1944 # apply changegroup to remote
1946 1945 if unbundle:
1947 1946 # local repo finds heads on server, finds out what
1948 1947 # revs it must push. once revs transferred, if server
1949 1948 # finds it has different heads (someone else won
1950 1949 # commit/push race), server aborts.
1951 1950 if force:
1952 1951 remoteheads = ['force']
1953 1952 # ssh: return remote's addchangegroup()
1954 1953 # http: return remote's addchangegroup() or 0 for error
1955 1954 ret = remote.unbundle(cg, remoteheads, 'push')
1956 1955 else:
1957 1956 # we return an integer indicating remote head count
1958 1957 # change
1959 1958 ret = remote.addchangegroup(cg, 'push', self.url())
1960 1959
1961 1960 if ret:
1962 1961 # push succeed, synchronize target of the push
1963 1962 cheads = outgoing.missingheads
1964 1963 elif revs is None:
1965 1964 # All out push fails. synchronize all common
1966 1965 cheads = outgoing.commonheads
1967 1966 else:
1968 1967 # I want cheads = heads(::missingheads and ::commonheads)
1969 1968 # (missingheads is revs with secret changeset filtered out)
1970 1969 #
1971 1970 # This can be expressed as:
1972 1971 # cheads = ( (missingheads and ::commonheads)
1973 1972 # + (commonheads and ::missingheads))"
1974 1973 # )
1975 1974 #
1976 1975 # while trying to push we already computed the following:
1977 1976 # common = (::commonheads)
1978 1977 # missing = ((commonheads::missingheads) - commonheads)
1979 1978 #
1980 1979 # We can pick:
1981 1980 # * missingheads part of common (::commonheads)
1982 1981 common = set(outgoing.common)
1983 1982 cheads = [node for node in revs if node in common]
1984 1983 # and
1985 1984 # * commonheads parents on missing
1986 1985 revset = unfi.set('%ln and parents(roots(%ln))',
1987 1986 outgoing.commonheads,
1988 1987 outgoing.missing)
1989 1988 cheads.extend(c.node() for c in revset)
1990 1989 # even when we don't push, exchanging phase data is useful
1991 1990 remotephases = remote.listkeys('phases')
1992 1991 if not remotephases: # old server or public only repo
1993 1992 phases.advanceboundary(self, phases.public, cheads)
1994 1993 # don't push any phase data as there is nothing to push
1995 1994 else:
1996 1995 ana = phases.analyzeremotephases(self, cheads, remotephases)
1997 1996 pheads, droots = ana
1998 1997 ### Apply remote phase on local
1999 1998 if remotephases.get('publishing', False):
2000 1999 phases.advanceboundary(self, phases.public, cheads)
2001 2000 else: # publish = False
2002 2001 phases.advanceboundary(self, phases.public, pheads)
2003 2002 phases.advanceboundary(self, phases.draft, cheads)
2004 2003 ### Apply local phase on remote
2005 2004
2006 2005 # Get the list of all revs draft on remote by public here.
2007 2006 # XXX Beware that revset break if droots is not strictly
2008 2007 # XXX root we may want to ensure it is but it is costly
2009 2008 outdated = unfi.set('heads((%ln::%ln) and public())',
2010 2009 droots, cheads)
2011 2010 for newremotehead in outdated:
2012 2011 r = remote.pushkey('phases',
2013 2012 newremotehead.hex(),
2014 2013 str(phases.draft),
2015 2014 str(phases.public))
2016 2015 if not r:
2017 2016 self.ui.warn(_('updating %s to public failed!\n')
2018 2017 % newremotehead)
2019 2018 self.ui.debug('try to push obsolete markers to remote\n')
2020 2019 if (obsolete._enabled and self.obsstore and
2021 2020 'obsolete' in remote.listkeys('namespaces')):
2022 2021 rslts = []
2023 2022 remotedata = self.listkeys('obsolete')
2024 2023 for key in sorted(remotedata, reverse=True):
2025 2024 # reverse sort to ensure we end with dump0
2026 2025 data = remotedata[key]
2027 2026 rslts.append(remote.pushkey('obsolete', key, '', data))
2028 2027 if [r for r in rslts if not r]:
2029 2028 msg = _('failed to push some obsolete markers!\n')
2030 2029 self.ui.warn(msg)
2031 2030 finally:
2032 2031 if lock is not None:
2033 2032 lock.release()
2034 2033 finally:
2035 2034 locallock.release()
2036 2035
2037 2036 self.ui.debug("checking for updated bookmarks\n")
2038 2037 rb = remote.listkeys('bookmarks')
2039 2038 for k in rb.keys():
2040 2039 if k in unfi._bookmarks:
2041 2040 nr, nl = rb[k], hex(self._bookmarks[k])
2042 2041 if nr in unfi:
2043 2042 cr = unfi[nr]
2044 2043 cl = unfi[nl]
2045 2044 if bookmarks.validdest(unfi, cr, cl):
2046 2045 r = remote.pushkey('bookmarks', k, nr, nl)
2047 2046 if r:
2048 2047 self.ui.status(_("updating bookmark %s\n") % k)
2049 2048 else:
2050 2049 self.ui.warn(_('updating bookmark %s'
2051 2050 ' failed!\n') % k)
2052 2051
2053 2052 return ret
2054 2053
2055 2054 def changegroupinfo(self, nodes, source):
2056 2055 if self.ui.verbose or source == 'bundle':
2057 2056 self.ui.status(_("%d changesets found\n") % len(nodes))
2058 2057 if self.ui.debugflag:
2059 2058 self.ui.debug("list of changesets:\n")
2060 2059 for node in nodes:
2061 2060 self.ui.debug("%s\n" % hex(node))
2062 2061
2063 2062 def changegroupsubset(self, bases, heads, source):
2064 2063 """Compute a changegroup consisting of all the nodes that are
2065 2064 descendants of any of the bases and ancestors of any of the heads.
2066 2065 Return a chunkbuffer object whose read() method will return
2067 2066 successive changegroup chunks.
2068 2067
2069 2068 It is fairly complex as determining which filenodes and which
2070 2069 manifest nodes need to be included for the changeset to be complete
2071 2070 is non-trivial.
2072 2071
2073 2072 Another wrinkle is doing the reverse, figuring out which changeset in
2074 2073 the changegroup a particular filenode or manifestnode belongs to.
2075 2074 """
2076 2075 cl = self.changelog
2077 2076 if not bases:
2078 2077 bases = [nullid]
2079 2078 csets, bases, heads = cl.nodesbetween(bases, heads)
2080 2079 # We assume that all ancestors of bases are known
2081 2080 common = cl.ancestors([cl.rev(n) for n in bases])
2082 2081 return self._changegroupsubset(common, csets, heads, source)
2083 2082
2084 2083 def getlocalbundle(self, source, outgoing):
2085 2084 """Like getbundle, but taking a discovery.outgoing as an argument.
2086 2085
2087 2086 This is only implemented for local repos and reuses potentially
2088 2087 precomputed sets in outgoing."""
2089 2088 if not outgoing.missing:
2090 2089 return None
2091 2090 return self._changegroupsubset(outgoing.common,
2092 2091 outgoing.missing,
2093 2092 outgoing.missingheads,
2094 2093 source)
2095 2094
2096 2095 def getbundle(self, source, heads=None, common=None):
2097 2096 """Like changegroupsubset, but returns the set difference between the
2098 2097 ancestors of heads and the ancestors common.
2099 2098
2100 2099 If heads is None, use the local heads. If common is None, use [nullid].
2101 2100
2102 2101 The nodes in common might not all be known locally due to the way the
2103 2102 current discovery protocol works.
2104 2103 """
2105 2104 cl = self.changelog
2106 2105 if common:
2107 2106 hasnode = cl.hasnode
2108 2107 common = [n for n in common if hasnode(n)]
2109 2108 else:
2110 2109 common = [nullid]
2111 2110 if not heads:
2112 2111 heads = cl.heads()
2113 2112 return self.getlocalbundle(source,
2114 2113 discovery.outgoing(cl, common, heads))
2115 2114
2116 2115 @unfilteredmethod
2117 2116 def _changegroupsubset(self, commonrevs, csets, heads, source):
2118 2117
2119 2118 cl = self.changelog
2120 2119 mf = self.manifest
2121 2120 mfs = {} # needed manifests
2122 2121 fnodes = {} # needed file nodes
2123 2122 changedfiles = set()
2124 2123 fstate = ['', {}]
2125 2124 count = [0, 0]
2126 2125
2127 2126 # can we go through the fast path ?
2128 2127 heads.sort()
2129 2128 if heads == sorted(self.heads()):
2130 2129 return self._changegroup(csets, source)
2131 2130
2132 2131 # slow path
2133 2132 self.hook('preoutgoing', throw=True, source=source)
2134 2133 self.changegroupinfo(csets, source)
2135 2134
2136 2135 # filter any nodes that claim to be part of the known set
2137 2136 def prune(revlog, missing):
2138 2137 rr, rl = revlog.rev, revlog.linkrev
2139 2138 return [n for n in missing
2140 2139 if rl(rr(n)) not in commonrevs]
2141 2140
2142 2141 progress = self.ui.progress
2143 2142 _bundling = _('bundling')
2144 2143 _changesets = _('changesets')
2145 2144 _manifests = _('manifests')
2146 2145 _files = _('files')
2147 2146
2148 2147 def lookup(revlog, x):
2149 2148 if revlog == cl:
2150 2149 c = cl.read(x)
2151 2150 changedfiles.update(c[3])
2152 2151 mfs.setdefault(c[0], x)
2153 2152 count[0] += 1
2154 2153 progress(_bundling, count[0],
2155 2154 unit=_changesets, total=count[1])
2156 2155 return x
2157 2156 elif revlog == mf:
2158 2157 clnode = mfs[x]
2159 2158 mdata = mf.readfast(x)
2160 2159 for f, n in mdata.iteritems():
2161 2160 if f in changedfiles:
2162 2161 fnodes[f].setdefault(n, clnode)
2163 2162 count[0] += 1
2164 2163 progress(_bundling, count[0],
2165 2164 unit=_manifests, total=count[1])
2166 2165 return clnode
2167 2166 else:
2168 2167 progress(_bundling, count[0], item=fstate[0],
2169 2168 unit=_files, total=count[1])
2170 2169 return fstate[1][x]
2171 2170
2172 2171 bundler = changegroup.bundle10(lookup)
2173 2172 reorder = self.ui.config('bundle', 'reorder', 'auto')
2174 2173 if reorder == 'auto':
2175 2174 reorder = None
2176 2175 else:
2177 2176 reorder = util.parsebool(reorder)
2178 2177
2179 2178 def gengroup():
2180 2179 # Create a changenode group generator that will call our functions
2181 2180 # back to lookup the owning changenode and collect information.
2182 2181 count[:] = [0, len(csets)]
2183 2182 for chunk in cl.group(csets, bundler, reorder=reorder):
2184 2183 yield chunk
2185 2184 progress(_bundling, None)
2186 2185
2187 2186 # Create a generator for the manifestnodes that calls our lookup
2188 2187 # and data collection functions back.
2189 2188 for f in changedfiles:
2190 2189 fnodes[f] = {}
2191 2190 count[:] = [0, len(mfs)]
2192 2191 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2193 2192 yield chunk
2194 2193 progress(_bundling, None)
2195 2194
2196 2195 mfs.clear()
2197 2196
2198 2197 # Go through all our files in order sorted by name.
2199 2198 count[:] = [0, len(changedfiles)]
2200 2199 for fname in sorted(changedfiles):
2201 2200 filerevlog = self.file(fname)
2202 2201 if not len(filerevlog):
2203 2202 raise util.Abort(_("empty or missing revlog for %s")
2204 2203 % fname)
2205 2204 fstate[0] = fname
2206 2205 fstate[1] = fnodes.pop(fname, {})
2207 2206
2208 2207 nodelist = prune(filerevlog, fstate[1])
2209 2208 if nodelist:
2210 2209 count[0] += 1
2211 2210 yield bundler.fileheader(fname)
2212 2211 for chunk in filerevlog.group(nodelist, bundler, reorder):
2213 2212 yield chunk
2214 2213
2215 2214 # Signal that no more groups are left.
2216 2215 yield bundler.close()
2217 2216 progress(_bundling, None)
2218 2217
2219 2218 if csets:
2220 2219 self.hook('outgoing', node=hex(csets[0]), source=source)
2221 2220
2222 2221 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2223 2222
2224 2223 def changegroup(self, basenodes, source):
2225 2224 # to avoid a race we use changegroupsubset() (issue1320)
2226 2225 return self.changegroupsubset(basenodes, self.heads(), source)
2227 2226
2228 2227 @unfilteredmethod
2229 2228 def _changegroup(self, nodes, source):
2230 2229 """Compute the changegroup of all nodes that we have that a recipient
2231 2230 doesn't. Return a chunkbuffer object whose read() method will return
2232 2231 successive changegroup chunks.
2233 2232
2234 2233 This is much easier than the previous function as we can assume that
2235 2234 the recipient has any changenode we aren't sending them.
2236 2235
2237 2236 nodes is the set of nodes to send"""
2238 2237
2239 2238 cl = self.changelog
2240 2239 mf = self.manifest
2241 2240 mfs = {}
2242 2241 changedfiles = set()
2243 2242 fstate = ['']
2244 2243 count = [0, 0]
2245 2244
2246 2245 self.hook('preoutgoing', throw=True, source=source)
2247 2246 self.changegroupinfo(nodes, source)
2248 2247
2249 2248 revset = set([cl.rev(n) for n in nodes])
2250 2249
2251 2250 def gennodelst(log):
2252 2251 ln, llr = log.node, log.linkrev
2253 2252 return [ln(r) for r in log if llr(r) in revset]
2254 2253
2255 2254 progress = self.ui.progress
2256 2255 _bundling = _('bundling')
2257 2256 _changesets = _('changesets')
2258 2257 _manifests = _('manifests')
2259 2258 _files = _('files')
2260 2259
2261 2260 def lookup(revlog, x):
2262 2261 if revlog == cl:
2263 2262 c = cl.read(x)
2264 2263 changedfiles.update(c[3])
2265 2264 mfs.setdefault(c[0], x)
2266 2265 count[0] += 1
2267 2266 progress(_bundling, count[0],
2268 2267 unit=_changesets, total=count[1])
2269 2268 return x
2270 2269 elif revlog == mf:
2271 2270 count[0] += 1
2272 2271 progress(_bundling, count[0],
2273 2272 unit=_manifests, total=count[1])
2274 2273 return cl.node(revlog.linkrev(revlog.rev(x)))
2275 2274 else:
2276 2275 progress(_bundling, count[0], item=fstate[0],
2277 2276 total=count[1], unit=_files)
2278 2277 return cl.node(revlog.linkrev(revlog.rev(x)))
2279 2278
2280 2279 bundler = changegroup.bundle10(lookup)
2281 2280 reorder = self.ui.config('bundle', 'reorder', 'auto')
2282 2281 if reorder == 'auto':
2283 2282 reorder = None
2284 2283 else:
2285 2284 reorder = util.parsebool(reorder)
2286 2285
2287 2286 def gengroup():
2288 2287 '''yield a sequence of changegroup chunks (strings)'''
2289 2288 # construct a list of all changed files
2290 2289
2291 2290 count[:] = [0, len(nodes)]
2292 2291 for chunk in cl.group(nodes, bundler, reorder=reorder):
2293 2292 yield chunk
2294 2293 progress(_bundling, None)
2295 2294
2296 2295 count[:] = [0, len(mfs)]
2297 2296 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2298 2297 yield chunk
2299 2298 progress(_bundling, None)
2300 2299
2301 2300 count[:] = [0, len(changedfiles)]
2302 2301 for fname in sorted(changedfiles):
2303 2302 filerevlog = self.file(fname)
2304 2303 if not len(filerevlog):
2305 2304 raise util.Abort(_("empty or missing revlog for %s")
2306 2305 % fname)
2307 2306 fstate[0] = fname
2308 2307 nodelist = gennodelst(filerevlog)
2309 2308 if nodelist:
2310 2309 count[0] += 1
2311 2310 yield bundler.fileheader(fname)
2312 2311 for chunk in filerevlog.group(nodelist, bundler, reorder):
2313 2312 yield chunk
2314 2313 yield bundler.close()
2315 2314 progress(_bundling, None)
2316 2315
2317 2316 if nodes:
2318 2317 self.hook('outgoing', node=hex(nodes[0]), source=source)
2319 2318
2320 2319 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2321 2320
2322 2321 @unfilteredmethod
2323 2322 def addchangegroup(self, source, srctype, url, emptyok=False):
2324 2323 """Add the changegroup returned by source.read() to this repo.
2325 2324 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2326 2325 the URL of the repo where this changegroup is coming from.
2327 2326
2328 2327 Return an integer summarizing the change to this repo:
2329 2328 - nothing changed or no source: 0
2330 2329 - more heads than before: 1+added heads (2..n)
2331 2330 - fewer heads than before: -1-removed heads (-2..-n)
2332 2331 - number of heads stays the same: 1
2333 2332 """
2334 2333 def csmap(x):
2335 2334 self.ui.debug("add changeset %s\n" % short(x))
2336 2335 return len(cl)
2337 2336
2338 2337 def revmap(x):
2339 2338 return cl.rev(x)
2340 2339
2341 2340 if not source:
2342 2341 return 0
2343 2342
2344 2343 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2345 2344
2346 2345 changesets = files = revisions = 0
2347 2346 efiles = set()
2348 2347
2349 2348 # write changelog data to temp files so concurrent readers will not see
2350 2349 # inconsistent view
2351 2350 cl = self.changelog
2352 2351 cl.delayupdate()
2353 2352 oldheads = cl.heads()
2354 2353
2355 2354 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2356 2355 try:
2357 2356 trp = weakref.proxy(tr)
2358 2357 # pull off the changeset group
2359 2358 self.ui.status(_("adding changesets\n"))
2360 2359 clstart = len(cl)
2361 2360 class prog(object):
2362 2361 step = _('changesets')
2363 2362 count = 1
2364 2363 ui = self.ui
2365 2364 total = None
2366 2365 def __call__(self):
2367 2366 self.ui.progress(self.step, self.count, unit=_('chunks'),
2368 2367 total=self.total)
2369 2368 self.count += 1
2370 2369 pr = prog()
2371 2370 source.callback = pr
2372 2371
2373 2372 source.changelogheader()
2374 2373 srccontent = cl.addgroup(source, csmap, trp)
2375 2374 if not (srccontent or emptyok):
2376 2375 raise util.Abort(_("received changelog group is empty"))
2377 2376 clend = len(cl)
2378 2377 changesets = clend - clstart
2379 2378 for c in xrange(clstart, clend):
2380 2379 efiles.update(self[c].files())
2381 2380 efiles = len(efiles)
2382 2381 self.ui.progress(_('changesets'), None)
2383 2382
2384 2383 # pull off the manifest group
2385 2384 self.ui.status(_("adding manifests\n"))
2386 2385 pr.step = _('manifests')
2387 2386 pr.count = 1
2388 2387 pr.total = changesets # manifests <= changesets
2389 2388 # no need to check for empty manifest group here:
2390 2389 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2391 2390 # no new manifest will be created and the manifest group will
2392 2391 # be empty during the pull
2393 2392 source.manifestheader()
2394 2393 self.manifest.addgroup(source, revmap, trp)
2395 2394 self.ui.progress(_('manifests'), None)
2396 2395
2397 2396 needfiles = {}
2398 2397 if self.ui.configbool('server', 'validate', default=False):
2399 2398 # validate incoming csets have their manifests
2400 2399 for cset in xrange(clstart, clend):
2401 2400 mfest = self.changelog.read(self.changelog.node(cset))[0]
2402 2401 mfest = self.manifest.readdelta(mfest)
2403 2402 # store file nodes we must see
2404 2403 for f, n in mfest.iteritems():
2405 2404 needfiles.setdefault(f, set()).add(n)
2406 2405
2407 2406 # process the files
2408 2407 self.ui.status(_("adding file changes\n"))
2409 2408 pr.step = _('files')
2410 2409 pr.count = 1
2411 2410 pr.total = efiles
2412 2411 source.callback = None
2413 2412
2414 2413 while True:
2415 2414 chunkdata = source.filelogheader()
2416 2415 if not chunkdata:
2417 2416 break
2418 2417 f = chunkdata["filename"]
2419 2418 self.ui.debug("adding %s revisions\n" % f)
2420 2419 pr()
2421 2420 fl = self.file(f)
2422 2421 o = len(fl)
2423 2422 if not fl.addgroup(source, revmap, trp):
2424 2423 raise util.Abort(_("received file revlog group is empty"))
2425 2424 revisions += len(fl) - o
2426 2425 files += 1
2427 2426 if f in needfiles:
2428 2427 needs = needfiles[f]
2429 2428 for new in xrange(o, len(fl)):
2430 2429 n = fl.node(new)
2431 2430 if n in needs:
2432 2431 needs.remove(n)
2433 2432 if not needs:
2434 2433 del needfiles[f]
2435 2434 self.ui.progress(_('files'), None)
2436 2435
2437 2436 for f, needs in needfiles.iteritems():
2438 2437 fl = self.file(f)
2439 2438 for n in needs:
2440 2439 try:
2441 2440 fl.rev(n)
2442 2441 except error.LookupError:
2443 2442 raise util.Abort(
2444 2443 _('missing file data for %s:%s - run hg verify') %
2445 2444 (f, hex(n)))
2446 2445
2447 2446 dh = 0
2448 2447 if oldheads:
2449 2448 heads = cl.heads()
2450 2449 dh = len(heads) - len(oldheads)
2451 2450 for h in heads:
2452 2451 if h not in oldheads and self[h].closesbranch():
2453 2452 dh -= 1
2454 2453 htext = ""
2455 2454 if dh:
2456 2455 htext = _(" (%+d heads)") % dh
2457 2456
2458 2457 self.ui.status(_("added %d changesets"
2459 2458 " with %d changes to %d files%s\n")
2460 2459 % (changesets, revisions, files, htext))
2461 2460 self.invalidatevolatilesets()
2462 2461
2463 2462 if changesets > 0:
2464 2463 p = lambda: cl.writepending() and self.root or ""
2465 2464 self.hook('pretxnchangegroup', throw=True,
2466 2465 node=hex(cl.node(clstart)), source=srctype,
2467 2466 url=url, pending=p)
2468 2467
2469 2468 added = [cl.node(r) for r in xrange(clstart, clend)]
2470 2469 publishing = self.ui.configbool('phases', 'publish', True)
2471 2470 if srctype == 'push':
2472 2471 # Old server can not push the boundary themself.
2473 2472 # New server won't push the boundary if changeset already
2474 2473 # existed locally as secrete
2475 2474 #
2476 2475 # We should not use added here but the list of all change in
2477 2476 # the bundle
2478 2477 if publishing:
2479 2478 phases.advanceboundary(self, phases.public, srccontent)
2480 2479 else:
2481 2480 phases.advanceboundary(self, phases.draft, srccontent)
2482 2481 phases.retractboundary(self, phases.draft, added)
2483 2482 elif srctype != 'strip':
2484 2483 # publishing only alter behavior during push
2485 2484 #
2486 2485 # strip should not touch boundary at all
2487 2486 phases.retractboundary(self, phases.draft, added)
2488 2487
2489 2488 # make changelog see real files again
2490 2489 cl.finalize(trp)
2491 2490
2492 2491 tr.close()
2493 2492
2494 2493 if changesets > 0:
2495 2494 self.updatebranchcache()
2496 2495 def runhooks():
2497 2496 # forcefully update the on-disk branch cache
2498 2497 self.ui.debug("updating the branch cache\n")
2499 2498 self.hook("changegroup", node=hex(cl.node(clstart)),
2500 2499 source=srctype, url=url)
2501 2500
2502 2501 for n in added:
2503 2502 self.hook("incoming", node=hex(n), source=srctype,
2504 2503 url=url)
2505 2504 self._afterlock(runhooks)
2506 2505
2507 2506 finally:
2508 2507 tr.release()
2509 2508 # never return 0 here:
2510 2509 if dh < 0:
2511 2510 return dh - 1
2512 2511 else:
2513 2512 return dh + 1
2514 2513
2515 2514 def stream_in(self, remote, requirements):
2516 2515 lock = self.lock()
2517 2516 try:
2518 2517 # Save remote branchmap. We will use it later
2519 2518 # to speed up branchcache creation
2520 2519 rbranchmap = None
2521 2520 if remote.capable("branchmap"):
2522 2521 rbranchmap = remote.branchmap()
2523 2522
2524 2523 fp = remote.stream_out()
2525 2524 l = fp.readline()
2526 2525 try:
2527 2526 resp = int(l)
2528 2527 except ValueError:
2529 2528 raise error.ResponseError(
2530 2529 _('unexpected response from remote server:'), l)
2531 2530 if resp == 1:
2532 2531 raise util.Abort(_('operation forbidden by server'))
2533 2532 elif resp == 2:
2534 2533 raise util.Abort(_('locking the remote repository failed'))
2535 2534 elif resp != 0:
2536 2535 raise util.Abort(_('the server sent an unknown error code'))
2537 2536 self.ui.status(_('streaming all changes\n'))
2538 2537 l = fp.readline()
2539 2538 try:
2540 2539 total_files, total_bytes = map(int, l.split(' ', 1))
2541 2540 except (ValueError, TypeError):
2542 2541 raise error.ResponseError(
2543 2542 _('unexpected response from remote server:'), l)
2544 2543 self.ui.status(_('%d files to transfer, %s of data\n') %
2545 2544 (total_files, util.bytecount(total_bytes)))
2546 2545 handled_bytes = 0
2547 2546 self.ui.progress(_('clone'), 0, total=total_bytes)
2548 2547 start = time.time()
2549 2548 for i in xrange(total_files):
2550 2549 # XXX doesn't support '\n' or '\r' in filenames
2551 2550 l = fp.readline()
2552 2551 try:
2553 2552 name, size = l.split('\0', 1)
2554 2553 size = int(size)
2555 2554 except (ValueError, TypeError):
2556 2555 raise error.ResponseError(
2557 2556 _('unexpected response from remote server:'), l)
2558 2557 if self.ui.debugflag:
2559 2558 self.ui.debug('adding %s (%s)\n' %
2560 2559 (name, util.bytecount(size)))
2561 2560 # for backwards compat, name was partially encoded
2562 2561 ofp = self.sopener(store.decodedir(name), 'w')
2563 2562 for chunk in util.filechunkiter(fp, limit=size):
2564 2563 handled_bytes += len(chunk)
2565 2564 self.ui.progress(_('clone'), handled_bytes,
2566 2565 total=total_bytes)
2567 2566 ofp.write(chunk)
2568 2567 ofp.close()
2569 2568 elapsed = time.time() - start
2570 2569 if elapsed <= 0:
2571 2570 elapsed = 0.001
2572 2571 self.ui.progress(_('clone'), None)
2573 2572 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2574 2573 (util.bytecount(total_bytes), elapsed,
2575 2574 util.bytecount(total_bytes / elapsed)))
2576 2575
2577 2576 # new requirements = old non-format requirements +
2578 2577 # new format-related
2579 2578 # requirements from the streamed-in repository
2580 2579 requirements.update(set(self.requirements) - self.supportedformats)
2581 2580 self._applyrequirements(requirements)
2582 2581 self._writerequirements()
2583 2582
2584 2583 if rbranchmap:
2585 2584 rbheads = []
2586 2585 for bheads in rbranchmap.itervalues():
2587 2586 rbheads.extend(bheads)
2588 2587
2589 2588 self.branchcache = rbranchmap
2590 2589 if rbheads:
2591 2590 rtiprev = max((int(self.changelog.rev(node))
2592 2591 for node in rbheads))
2593 2592 branchmap.write(self, self.branchcache,
2594 2593 self[rtiprev].node(), rtiprev)
2595 2594 self.invalidate()
2596 2595 return len(self.heads()) + 1
2597 2596 finally:
2598 2597 lock.release()
2599 2598
2600 2599 def clone(self, remote, heads=[], stream=False):
2601 2600 '''clone remote repository.
2602 2601
2603 2602 keyword arguments:
2604 2603 heads: list of revs to clone (forces use of pull)
2605 2604 stream: use streaming clone if possible'''
2606 2605
2607 2606 # now, all clients that can request uncompressed clones can
2608 2607 # read repo formats supported by all servers that can serve
2609 2608 # them.
2610 2609
2611 2610 # if revlog format changes, client will have to check version
2612 2611 # and format flags on "stream" capability, and use
2613 2612 # uncompressed only if compatible.
2614 2613
2615 2614 if not stream:
2616 2615 # if the server explicitly prefers to stream (for fast LANs)
2617 2616 stream = remote.capable('stream-preferred')
2618 2617
2619 2618 if stream and not heads:
2620 2619 # 'stream' means remote revlog format is revlogv1 only
2621 2620 if remote.capable('stream'):
2622 2621 return self.stream_in(remote, set(('revlogv1',)))
2623 2622 # otherwise, 'streamreqs' contains the remote revlog format
2624 2623 streamreqs = remote.capable('streamreqs')
2625 2624 if streamreqs:
2626 2625 streamreqs = set(streamreqs.split(','))
2627 2626 # if we support it, stream in and adjust our requirements
2628 2627 if not streamreqs - self.supportedformats:
2629 2628 return self.stream_in(remote, streamreqs)
2630 2629 return self.pull(remote, heads)
2631 2630
2632 2631 def pushkey(self, namespace, key, old, new):
2633 2632 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2634 2633 old=old, new=new)
2635 2634 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2636 2635 ret = pushkey.push(self, namespace, key, old, new)
2637 2636 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2638 2637 ret=ret)
2639 2638 return ret
2640 2639
2641 2640 def listkeys(self, namespace):
2642 2641 self.hook('prelistkeys', throw=True, namespace=namespace)
2643 2642 self.ui.debug('listing keys for "%s"\n' % namespace)
2644 2643 values = pushkey.list(self, namespace)
2645 2644 self.hook('listkeys', namespace=namespace, values=values)
2646 2645 return values
2647 2646
2648 2647 def debugwireargs(self, one, two, three=None, four=None, five=None):
2649 2648 '''used to test argument passing over the wire'''
2650 2649 return "%s %s %s %s %s" % (one, two, three, four, five)
2651 2650
2652 2651 def savecommitmessage(self, text):
2653 2652 fp = self.opener('last-message.txt', 'wb')
2654 2653 try:
2655 2654 fp.write(text)
2656 2655 finally:
2657 2656 fp.close()
2658 2657 return self.pathto(fp.name[len(self.root) + 1:])
2659 2658
2660 2659 # used to avoid circular references so destructors work
2661 2660 def aftertrans(files):
2662 2661 renamefiles = [tuple(t) for t in files]
2663 2662 def a():
2664 2663 for src, dest in renamefiles:
2665 2664 try:
2666 2665 util.rename(src, dest)
2667 2666 except OSError: # journal file does not yet exist
2668 2667 pass
2669 2668 return a
2670 2669
2671 2670 def undoname(fn):
2672 2671 base, name = os.path.split(fn)
2673 2672 assert name.startswith('journal')
2674 2673 return os.path.join(base, name.replace('journal', 'undo', 1))
2675 2674
2676 2675 def instance(ui, path, create):
2677 2676 return localrepository(ui, util.urllocalpath(path), create)
2678 2677
2679 2678 def islocal(path):
2680 2679 return True
General Comments 0
You need to be logged in to leave comments. Login now