##// END OF EJS Templates
branchmap: extract write logic from localrepo
Pierre-Yves David -
r18117:526e7ec5 default
parent child Browse files
Show More
@@ -1,6 +1,20
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7
8 from node import hex
9 import encoding
10
11 def write(repo, branches, tip, tiprev):
12 try:
13 f = repo.opener("cache/branchheads", "w", atomictemp=True)
14 f.write("%s %s\n" % (hex(tip), tiprev))
15 for label, nodes in branches.iteritems():
16 for node in nodes:
17 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
18 f.close()
19 except (IOError, OSError):
20 pass
@@ -1,2724 +1,2713
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 import branchmap
18 19 propertycache = util.propertycache
19 20 filecache = scmutil.filecache
20 21
21 22 class repofilecache(filecache):
22 23 """All filecache usage on repo are done for logic that should be unfiltered
23 24 """
24 25
25 26 def __get__(self, repo, type=None):
26 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 28 def __set__(self, repo, value):
28 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 30 def __delete__(self, repo):
30 31 return super(repofilecache, self).__delete__(repo.unfiltered())
31 32
32 33 class storecache(repofilecache):
33 34 """filecache for files in the store"""
34 35 def join(self, obj, fname):
35 36 return obj.sjoin(fname)
36 37
37 38 class unfilteredpropertycache(propertycache):
38 39 """propertycache that apply to unfiltered repo only"""
39 40
40 41 def __get__(self, repo, type=None):
41 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 43
43 44 class filteredpropertycache(propertycache):
44 45 """propertycache that must take filtering in account"""
45 46
46 47 def cachevalue(self, obj, value):
47 48 object.__setattr__(obj, self.name, value)
48 49
49 50
50 51 def hasunfilteredcache(repo, name):
51 52 """check if an repo and a unfilteredproperty cached value for <name>"""
52 53 return name in vars(repo.unfiltered())
53 54
54 55 def unfilteredmethod(orig):
55 56 """decorate method that always need to be run on unfiltered version"""
56 57 def wrapper(repo, *args, **kwargs):
57 58 return orig(repo.unfiltered(), *args, **kwargs)
58 59 return wrapper
59 60
60 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62 63
63 64 class localpeer(peer.peerrepository):
64 65 '''peer for a local repo; reflects only the most recent API'''
65 66
66 67 def __init__(self, repo, caps=MODERNCAPS):
67 68 peer.peerrepository.__init__(self)
68 69 self._repo = repo
69 70 self.ui = repo.ui
70 71 self._caps = repo._restrictcapabilities(caps)
71 72 self.requirements = repo.requirements
72 73 self.supportedformats = repo.supportedformats
73 74
74 75 def close(self):
75 76 self._repo.close()
76 77
77 78 def _capabilities(self):
78 79 return self._caps
79 80
80 81 def local(self):
81 82 return self._repo
82 83
83 84 def canpush(self):
84 85 return True
85 86
86 87 def url(self):
87 88 return self._repo.url()
88 89
89 90 def lookup(self, key):
90 91 return self._repo.lookup(key)
91 92
92 93 def branchmap(self):
93 94 return discovery.visiblebranchmap(self._repo)
94 95
95 96 def heads(self):
96 97 return discovery.visibleheads(self._repo)
97 98
98 99 def known(self, nodes):
99 100 return self._repo.known(nodes)
100 101
101 102 def getbundle(self, source, heads=None, common=None):
102 103 return self._repo.getbundle(source, heads=heads, common=common)
103 104
104 105 # TODO We might want to move the next two calls into legacypeer and add
105 106 # unbundle instead.
106 107
107 108 def lock(self):
108 109 return self._repo.lock()
109 110
110 111 def addchangegroup(self, cg, source, url):
111 112 return self._repo.addchangegroup(cg, source, url)
112 113
113 114 def pushkey(self, namespace, key, old, new):
114 115 return self._repo.pushkey(namespace, key, old, new)
115 116
116 117 def listkeys(self, namespace):
117 118 return self._repo.listkeys(namespace)
118 119
119 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 121 '''used to test argument passing over the wire'''
121 122 return "%s %s %s %s %s" % (one, two, three, four, five)
122 123
123 124 class locallegacypeer(localpeer):
124 125 '''peer extension which implements legacy methods too; used for tests with
125 126 restricted capabilities'''
126 127
127 128 def __init__(self, repo):
128 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129 130
130 131 def branches(self, nodes):
131 132 return self._repo.branches(nodes)
132 133
133 134 def between(self, pairs):
134 135 return self._repo.between(pairs)
135 136
136 137 def changegroup(self, basenodes, source):
137 138 return self._repo.changegroup(basenodes, source)
138 139
139 140 def changegroupsubset(self, bases, heads, source):
140 141 return self._repo.changegroupsubset(bases, heads, source)
141 142
142 143 class localrepository(object):
143 144
144 145 supportedformats = set(('revlogv1', 'generaldelta'))
145 146 supported = supportedformats | set(('store', 'fncache', 'shared',
146 147 'dotencode'))
147 148 openerreqs = set(('revlogv1', 'generaldelta'))
148 149 requirements = ['revlogv1']
149 150
150 151 def _baserequirements(self, create):
151 152 return self.requirements[:]
152 153
153 154 def __init__(self, baseui, path=None, create=False):
154 155 self.wvfs = scmutil.vfs(path, expand=True)
155 156 self.wopener = self.wvfs
156 157 self.root = self.wvfs.base
157 158 self.path = self.wvfs.join(".hg")
158 159 self.origroot = path
159 160 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 161 self.vfs = scmutil.vfs(self.path)
161 162 self.opener = self.vfs
162 163 self.baseui = baseui
163 164 self.ui = baseui.copy()
164 165 # A list of callback to shape the phase if no data were found.
165 166 # Callback are in the form: func(repo, roots) --> processed root.
166 167 # This list it to be filled by extension during repo setup
167 168 self._phasedefaults = []
168 169 try:
169 170 self.ui.readconfig(self.join("hgrc"), self.root)
170 171 extensions.loadall(self.ui)
171 172 except IOError:
172 173 pass
173 174
174 175 if not self.vfs.isdir():
175 176 if create:
176 177 if not self.wvfs.exists():
177 178 self.wvfs.makedirs()
178 179 self.vfs.makedir(notindexed=True)
179 180 requirements = self._baserequirements(create)
180 181 if self.ui.configbool('format', 'usestore', True):
181 182 self.vfs.mkdir("store")
182 183 requirements.append("store")
183 184 if self.ui.configbool('format', 'usefncache', True):
184 185 requirements.append("fncache")
185 186 if self.ui.configbool('format', 'dotencode', True):
186 187 requirements.append('dotencode')
187 188 # create an invalid changelog
188 189 self.vfs.append(
189 190 "00changelog.i",
190 191 '\0\0\0\2' # represents revlogv2
191 192 ' dummy changelog to prevent using the old repo layout'
192 193 )
193 194 if self.ui.configbool('format', 'generaldelta', False):
194 195 requirements.append("generaldelta")
195 196 requirements = set(requirements)
196 197 else:
197 198 raise error.RepoError(_("repository %s not found") % path)
198 199 elif create:
199 200 raise error.RepoError(_("repository %s already exists") % path)
200 201 else:
201 202 try:
202 203 requirements = scmutil.readrequires(self.vfs, self.supported)
203 204 except IOError, inst:
204 205 if inst.errno != errno.ENOENT:
205 206 raise
206 207 requirements = set()
207 208
208 209 self.sharedpath = self.path
209 210 try:
210 211 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 212 if not os.path.exists(s):
212 213 raise error.RepoError(
213 214 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 215 self.sharedpath = s
215 216 except IOError, inst:
216 217 if inst.errno != errno.ENOENT:
217 218 raise
218 219
219 220 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 221 self.spath = self.store.path
221 222 self.svfs = self.store.vfs
222 223 self.sopener = self.svfs
223 224 self.sjoin = self.store.join
224 225 self.vfs.createmode = self.store.createmode
225 226 self._applyrequirements(requirements)
226 227 if create:
227 228 self._writerequirements()
228 229
229 230
230 231 self._branchcache = None
231 232 self._branchcachetip = None
232 233 self.filterpats = {}
233 234 self._datafilters = {}
234 235 self._transref = self._lockref = self._wlockref = None
235 236
236 237 # A cache for various files under .hg/ that tracks file changes,
237 238 # (used by the filecache decorator)
238 239 #
239 240 # Maps a property name to its util.filecacheentry
240 241 self._filecache = {}
241 242
242 243 # hold sets of revision to be filtered
243 244 # should be cleared when something might have changed the filter value:
244 245 # - new changesets,
245 246 # - phase change,
246 247 # - new obsolescence marker,
247 248 # - working directory parent change,
248 249 # - bookmark changes
249 250 self.filteredrevcache = {}
250 251
251 252 def close(self):
252 253 pass
253 254
254 255 def _restrictcapabilities(self, caps):
255 256 return caps
256 257
257 258 def _applyrequirements(self, requirements):
258 259 self.requirements = requirements
259 260 self.sopener.options = dict((r, 1) for r in requirements
260 261 if r in self.openerreqs)
261 262
262 263 def _writerequirements(self):
263 264 reqfile = self.opener("requires", "w")
264 265 for r in self.requirements:
265 266 reqfile.write("%s\n" % r)
266 267 reqfile.close()
267 268
268 269 def _checknested(self, path):
269 270 """Determine if path is a legal nested repository."""
270 271 if not path.startswith(self.root):
271 272 return False
272 273 subpath = path[len(self.root) + 1:]
273 274 normsubpath = util.pconvert(subpath)
274 275
275 276 # XXX: Checking against the current working copy is wrong in
276 277 # the sense that it can reject things like
277 278 #
278 279 # $ hg cat -r 10 sub/x.txt
279 280 #
280 281 # if sub/ is no longer a subrepository in the working copy
281 282 # parent revision.
282 283 #
283 284 # However, it can of course also allow things that would have
284 285 # been rejected before, such as the above cat command if sub/
285 286 # is a subrepository now, but was a normal directory before.
286 287 # The old path auditor would have rejected by mistake since it
287 288 # panics when it sees sub/.hg/.
288 289 #
289 290 # All in all, checking against the working copy seems sensible
290 291 # since we want to prevent access to nested repositories on
291 292 # the filesystem *now*.
292 293 ctx = self[None]
293 294 parts = util.splitpath(subpath)
294 295 while parts:
295 296 prefix = '/'.join(parts)
296 297 if prefix in ctx.substate:
297 298 if prefix == normsubpath:
298 299 return True
299 300 else:
300 301 sub = ctx.sub(prefix)
301 302 return sub.checknested(subpath[len(prefix) + 1:])
302 303 else:
303 304 parts.pop()
304 305 return False
305 306
306 307 def peer(self):
307 308 return localpeer(self) # not cached to avoid reference cycle
308 309
309 310 def unfiltered(self):
310 311 """Return unfiltered version of the repository
311 312
312 313 Intended to be ovewritten by filtered repo."""
313 314 return self
314 315
315 316 def filtered(self, name):
316 317 """Return a filtered version of a repository"""
317 318 # build a new class with the mixin and the current class
318 319 # (possibily subclass of the repo)
319 320 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 321 pass
321 322 return proxycls(self, name)
322 323
323 324 @repofilecache('bookmarks')
324 325 def _bookmarks(self):
325 326 return bookmarks.bmstore(self)
326 327
327 328 @repofilecache('bookmarks.current')
328 329 def _bookmarkcurrent(self):
329 330 return bookmarks.readcurrent(self)
330 331
331 332 def bookmarkheads(self, bookmark):
332 333 name = bookmark.split('@', 1)[0]
333 334 heads = []
334 335 for mark, n in self._bookmarks.iteritems():
335 336 if mark.split('@', 1)[0] == name:
336 337 heads.append(n)
337 338 return heads
338 339
339 340 @storecache('phaseroots')
340 341 def _phasecache(self):
341 342 return phases.phasecache(self, self._phasedefaults)
342 343
343 344 @storecache('obsstore')
344 345 def obsstore(self):
345 346 store = obsolete.obsstore(self.sopener)
346 347 if store and not obsolete._enabled:
347 348 # message is rare enough to not be translated
348 349 msg = 'obsolete feature not enabled but %i markers found!\n'
349 350 self.ui.warn(msg % len(list(store)))
350 351 return store
351 352
352 353 @unfilteredpropertycache
353 354 def hiddenrevs(self):
354 355 """hiddenrevs: revs that should be hidden by command and tools
355 356
356 357 This set is carried on the repo to ease initialization and lazy
357 358 loading; it'll probably move back to changelog for efficiency and
358 359 consistency reasons.
359 360
360 361 Note that the hiddenrevs will needs invalidations when
361 362 - a new changesets is added (possible unstable above extinct)
362 363 - a new obsolete marker is added (possible new extinct changeset)
363 364
364 365 hidden changesets cannot have non-hidden descendants
365 366 """
366 367 hidden = set()
367 368 if self.obsstore:
368 369 ### hide extinct changeset that are not accessible by any mean
369 370 hiddenquery = 'extinct() - ::(. + bookmark())'
370 371 hidden.update(self.revs(hiddenquery))
371 372 return hidden
372 373
373 374 @storecache('00changelog.i')
374 375 def changelog(self):
375 376 c = changelog.changelog(self.sopener)
376 377 if 'HG_PENDING' in os.environ:
377 378 p = os.environ['HG_PENDING']
378 379 if p.startswith(self.root):
379 380 c.readpending('00changelog.i.a')
380 381 return c
381 382
382 383 @storecache('00manifest.i')
383 384 def manifest(self):
384 385 return manifest.manifest(self.sopener)
385 386
386 387 @repofilecache('dirstate')
387 388 def dirstate(self):
388 389 warned = [0]
389 390 def validate(node):
390 391 try:
391 392 self.changelog.rev(node)
392 393 return node
393 394 except error.LookupError:
394 395 if not warned[0]:
395 396 warned[0] = True
396 397 self.ui.warn(_("warning: ignoring unknown"
397 398 " working parent %s!\n") % short(node))
398 399 return nullid
399 400
400 401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401 402
402 403 def __getitem__(self, changeid):
403 404 if changeid is None:
404 405 return context.workingctx(self)
405 406 return context.changectx(self, changeid)
406 407
407 408 def __contains__(self, changeid):
408 409 try:
409 410 return bool(self.lookup(changeid))
410 411 except error.RepoLookupError:
411 412 return False
412 413
413 414 def __nonzero__(self):
414 415 return True
415 416
416 417 def __len__(self):
417 418 return len(self.changelog)
418 419
419 420 def __iter__(self):
420 421 return iter(self.changelog)
421 422
422 423 def revs(self, expr, *args):
423 424 '''Return a list of revisions matching the given revset'''
424 425 expr = revset.formatspec(expr, *args)
425 426 m = revset.match(None, expr)
426 427 return [r for r in m(self, list(self))]
427 428
428 429 def set(self, expr, *args):
429 430 '''
430 431 Yield a context for each matching revision, after doing arg
431 432 replacement via revset.formatspec
432 433 '''
433 434 for r in self.revs(expr, *args):
434 435 yield self[r]
435 436
436 437 def url(self):
437 438 return 'file:' + self.root
438 439
439 440 def hook(self, name, throw=False, **args):
440 441 return hook.hook(self.ui, self, name, throw, **args)
441 442
442 443 @unfilteredmethod
443 444 def _tag(self, names, node, message, local, user, date, extra={}):
444 445 if isinstance(names, str):
445 446 names = (names,)
446 447
447 448 branches = self.branchmap()
448 449 for name in names:
449 450 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 451 local=local)
451 452 if name in branches:
452 453 self.ui.warn(_("warning: tag %s conflicts with existing"
453 454 " branch name\n") % name)
454 455
455 456 def writetags(fp, names, munge, prevtags):
456 457 fp.seek(0, 2)
457 458 if prevtags and prevtags[-1] != '\n':
458 459 fp.write('\n')
459 460 for name in names:
460 461 m = munge and munge(name) or name
461 462 if (self._tagscache.tagtypes and
462 463 name in self._tagscache.tagtypes):
463 464 old = self.tags().get(name, nullid)
464 465 fp.write('%s %s\n' % (hex(old), m))
465 466 fp.write('%s %s\n' % (hex(node), m))
466 467 fp.close()
467 468
468 469 prevtags = ''
469 470 if local:
470 471 try:
471 472 fp = self.opener('localtags', 'r+')
472 473 except IOError:
473 474 fp = self.opener('localtags', 'a')
474 475 else:
475 476 prevtags = fp.read()
476 477
477 478 # local tags are stored in the current charset
478 479 writetags(fp, names, None, prevtags)
479 480 for name in names:
480 481 self.hook('tag', node=hex(node), tag=name, local=local)
481 482 return
482 483
483 484 try:
484 485 fp = self.wfile('.hgtags', 'rb+')
485 486 except IOError, e:
486 487 if e.errno != errno.ENOENT:
487 488 raise
488 489 fp = self.wfile('.hgtags', 'ab')
489 490 else:
490 491 prevtags = fp.read()
491 492
492 493 # committed tags are stored in UTF-8
493 494 writetags(fp, names, encoding.fromlocal, prevtags)
494 495
495 496 fp.close()
496 497
497 498 self.invalidatecaches()
498 499
499 500 if '.hgtags' not in self.dirstate:
500 501 self[None].add(['.hgtags'])
501 502
502 503 m = matchmod.exact(self.root, '', ['.hgtags'])
503 504 tagnode = self.commit(message, user, date, extra=extra, match=m)
504 505
505 506 for name in names:
506 507 self.hook('tag', node=hex(node), tag=name, local=local)
507 508
508 509 return tagnode
509 510
510 511 def tag(self, names, node, message, local, user, date):
511 512 '''tag a revision with one or more symbolic names.
512 513
513 514 names is a list of strings or, when adding a single tag, names may be a
514 515 string.
515 516
516 517 if local is True, the tags are stored in a per-repository file.
517 518 otherwise, they are stored in the .hgtags file, and a new
518 519 changeset is committed with the change.
519 520
520 521 keyword arguments:
521 522
522 523 local: whether to store tags in non-version-controlled file
523 524 (default False)
524 525
525 526 message: commit message to use if committing
526 527
527 528 user: name of user to use if committing
528 529
529 530 date: date tuple to use if committing'''
530 531
531 532 if not local:
532 533 for x in self.status()[:5]:
533 534 if '.hgtags' in x:
534 535 raise util.Abort(_('working copy of .hgtags is changed '
535 536 '(please commit .hgtags manually)'))
536 537
537 538 self.tags() # instantiate the cache
538 539 self._tag(names, node, message, local, user, date)
539 540
540 541 @filteredpropertycache
541 542 def _tagscache(self):
542 543 '''Returns a tagscache object that contains various tags related
543 544 caches.'''
544 545
545 546 # This simplifies its cache management by having one decorated
546 547 # function (this one) and the rest simply fetch things from it.
547 548 class tagscache(object):
548 549 def __init__(self):
549 550 # These two define the set of tags for this repository. tags
550 551 # maps tag name to node; tagtypes maps tag name to 'global' or
551 552 # 'local'. (Global tags are defined by .hgtags across all
552 553 # heads, and local tags are defined in .hg/localtags.)
553 554 # They constitute the in-memory cache of tags.
554 555 self.tags = self.tagtypes = None
555 556
556 557 self.nodetagscache = self.tagslist = None
557 558
558 559 cache = tagscache()
559 560 cache.tags, cache.tagtypes = self._findtags()
560 561
561 562 return cache
562 563
563 564 def tags(self):
564 565 '''return a mapping of tag to node'''
565 566 t = {}
566 567 if self.changelog.filteredrevs:
567 568 tags, tt = self._findtags()
568 569 else:
569 570 tags = self._tagscache.tags
570 571 for k, v in tags.iteritems():
571 572 try:
572 573 # ignore tags to unknown nodes
573 574 self.changelog.rev(v)
574 575 t[k] = v
575 576 except (error.LookupError, ValueError):
576 577 pass
577 578 return t
578 579
579 580 def _findtags(self):
580 581 '''Do the hard work of finding tags. Return a pair of dicts
581 582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 583 maps tag name to a string like \'global\' or \'local\'.
583 584 Subclasses or extensions are free to add their own tags, but
584 585 should be aware that the returned dicts will be retained for the
585 586 duration of the localrepo object.'''
586 587
587 588 # XXX what tagtype should subclasses/extensions use? Currently
588 589 # mq and bookmarks add tags, but do not set the tagtype at all.
589 590 # Should each extension invent its own tag type? Should there
590 591 # be one tagtype for all such "virtual" tags? Or is the status
591 592 # quo fine?
592 593
593 594 alltags = {} # map tag name to (node, hist)
594 595 tagtypes = {}
595 596
596 597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598 599
599 600 # Build the return dicts. Have to re-encode tag names because
600 601 # the tags module always uses UTF-8 (in order not to lose info
601 602 # writing to the cache), but the rest of Mercurial wants them in
602 603 # local encoding.
603 604 tags = {}
604 605 for (name, (node, hist)) in alltags.iteritems():
605 606 if node != nullid:
606 607 tags[encoding.tolocal(name)] = node
607 608 tags['tip'] = self.changelog.tip()
608 609 tagtypes = dict([(encoding.tolocal(name), value)
609 610 for (name, value) in tagtypes.iteritems()])
610 611 return (tags, tagtypes)
611 612
612 613 def tagtype(self, tagname):
613 614 '''
614 615 return the type of the given tag. result can be:
615 616
616 617 'local' : a local tag
617 618 'global' : a global tag
618 619 None : tag does not exist
619 620 '''
620 621
621 622 return self._tagscache.tagtypes.get(tagname)
622 623
623 624 def tagslist(self):
624 625 '''return a list of tags ordered by revision'''
625 626 if not self._tagscache.tagslist:
626 627 l = []
627 628 for t, n in self.tags().iteritems():
628 629 r = self.changelog.rev(n)
629 630 l.append((r, t, n))
630 631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631 632
632 633 return self._tagscache.tagslist
633 634
634 635 def nodetags(self, node):
635 636 '''return the tags associated with a node'''
636 637 if not self._tagscache.nodetagscache:
637 638 nodetagscache = {}
638 639 for t, n in self._tagscache.tags.iteritems():
639 640 nodetagscache.setdefault(n, []).append(t)
640 641 for tags in nodetagscache.itervalues():
641 642 tags.sort()
642 643 self._tagscache.nodetagscache = nodetagscache
643 644 return self._tagscache.nodetagscache.get(node, [])
644 645
645 646 def nodebookmarks(self, node):
646 647 marks = []
647 648 for bookmark, n in self._bookmarks.iteritems():
648 649 if n == node:
649 650 marks.append(bookmark)
650 651 return sorted(marks)
651 652
652 653 def _cacheabletip(self):
653 654 """tip-most revision stable enought to used in persistent cache
654 655
655 656 This function is overwritten by MQ to ensure we do not write cache for
656 657 a part of the history that will likely change.
657 658
658 659 Efficient handling of filtered revision in branchcache should offer a
659 660 better alternative. But we are using this approach until it is ready.
660 661 """
661 662 cl = self.changelog
662 663 return cl.rev(cl.tip())
663 664
664 665 @unfilteredmethod # Until we get a smarter cache management
665 666 def updatebranchcache(self):
666 667 cl = self.changelog
667 668 tip = cl.tip()
668 669 if self._branchcache is not None and self._branchcachetip == tip:
669 670 return
670 671
671 672 oldtip = self._branchcachetip
672 673 if oldtip is None or oldtip not in cl.nodemap:
673 674 partial, last, lrev = self._readbranchcache()
674 675 else:
675 676 lrev = cl.rev(oldtip)
676 677 partial = self._branchcache
677 678
678 679 catip = self._cacheabletip()
679 680 # if lrev == catip: cache is already up to date
680 681 # if lrev > catip: we have uncachable element in `partial` can't write
681 682 # on disk
682 683 if lrev < catip:
683 684 ctxgen = (self[r] for r in cl.revs(lrev + 1, catip))
684 685 self._updatebranchcache(partial, ctxgen)
685 self._writebranchcache(partial, cl.node(catip), catip)
686 branchmap.write(self, partial, cl.node(catip), catip)
686 687 lrev = catip
687 688 # If cacheable tip were lower than actual tip, we need to update the
688 689 # cache up to tip. This update (from cacheable to actual tip) is not
689 690 # written to disk since it's not cacheable.
690 691 tiprev = len(self) - 1
691 692 if lrev < tiprev:
692 693 ctxgen = (self[r] for r in cl.revs(lrev + 1, tiprev))
693 694 self._updatebranchcache(partial, ctxgen)
694 695 self._branchcache = partial
695 696 self._branchcachetip = tip
696 697
697 698 def branchmap(self):
698 699 '''returns a dictionary {branch: [branchheads]}'''
699 700 if self.changelog.filteredrevs:
700 701 # some changeset are excluded we can't use the cache
701 702 branchmap = {}
702 703 self._updatebranchcache(branchmap, (self[r] for r in self))
703 704 return branchmap
704 705 else:
705 706 self.updatebranchcache()
706 707 return self._branchcache
707 708
708 709
709 710 def _branchtip(self, heads):
710 711 '''return the tipmost branch head in heads'''
711 712 tip = heads[-1]
712 713 for h in reversed(heads):
713 714 if not self[h].closesbranch():
714 715 tip = h
715 716 break
716 717 return tip
717 718
718 719 def branchtip(self, branch):
719 720 '''return the tip node for a given branch'''
720 721 if branch not in self.branchmap():
721 722 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
722 723 return self._branchtip(self.branchmap()[branch])
723 724
724 725 def branchtags(self):
725 726 '''return a dict where branch names map to the tipmost head of
726 727 the branch, open heads come before closed'''
727 728 bt = {}
728 729 for bn, heads in self.branchmap().iteritems():
729 730 bt[bn] = self._branchtip(heads)
730 731 return bt
731 732
732 733 @unfilteredmethod # Until we get a smarter cache management
733 734 def _readbranchcache(self):
734 735 partial = {}
735 736 try:
736 737 f = self.opener("cache/branchheads")
737 738 lines = f.read().split('\n')
738 739 f.close()
739 740 except (IOError, OSError):
740 741 return {}, nullid, nullrev
741 742
742 743 try:
743 744 last, lrev = lines.pop(0).split(" ", 1)
744 745 last, lrev = bin(last), int(lrev)
745 746 if lrev >= len(self) or self[lrev].node() != last:
746 747 # invalidate the cache
747 748 raise ValueError('invalidating branch cache (tip differs)')
748 749 for l in lines:
749 750 if not l:
750 751 continue
751 752 node, label = l.split(" ", 1)
752 753 label = encoding.tolocal(label.strip())
753 754 if not node in self:
754 755 raise ValueError('invalidating branch cache because node '+
755 756 '%s does not exist' % node)
756 757 partial.setdefault(label, []).append(bin(node))
757 758 except KeyboardInterrupt:
758 759 raise
759 760 except Exception, inst:
760 761 if self.ui.debugflag:
761 762 self.ui.warn(str(inst), '\n')
762 763 partial, last, lrev = {}, nullid, nullrev
763 764 return partial, last, lrev
764 765
765 766 @unfilteredmethod # Until we get a smarter cache management
766 def _writebranchcache(self, branches, tip, tiprev):
767 try:
768 f = self.opener("cache/branchheads", "w", atomictemp=True)
769 f.write("%s %s\n" % (hex(tip), tiprev))
770 for label, nodes in branches.iteritems():
771 for node in nodes:
772 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
773 f.close()
774 except (IOError, OSError):
775 pass
776
777 @unfilteredmethod # Until we get a smarter cache management
778 767 def _updatebranchcache(self, partial, ctxgen):
779 768 """Given a branchhead cache, partial, that may have extra nodes or be
780 769 missing heads, and a generator of nodes that are at least a superset of
781 770 heads missing, this function updates partial to be correct.
782 771 """
783 772 # collect new branch entries
784 773 newbranches = {}
785 774 for c in ctxgen:
786 775 newbranches.setdefault(c.branch(), []).append(c.node())
787 776 # if older branchheads are reachable from new ones, they aren't
788 777 # really branchheads. Note checking parents is insufficient:
789 778 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
790 779 for branch, newnodes in newbranches.iteritems():
791 780 bheads = partial.setdefault(branch, [])
792 781 # Remove candidate heads that no longer are in the repo (e.g., as
793 782 # the result of a strip that just happened). Avoid using 'node in
794 783 # self' here because that dives down into branchcache code somewhat
795 784 # recursively.
796 785 bheadrevs = [self.changelog.rev(node) for node in bheads
797 786 if self.changelog.hasnode(node)]
798 787 newheadrevs = [self.changelog.rev(node) for node in newnodes
799 788 if self.changelog.hasnode(node)]
800 789 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
801 790 # Remove duplicates - nodes that are in newheadrevs and are already
802 791 # in bheadrevs. This can happen if you strip a node whose parent
803 792 # was already a head (because they're on different branches).
804 793 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
805 794
806 795 # Starting from tip means fewer passes over reachable. If we know
807 796 # the new candidates are not ancestors of existing heads, we don't
808 797 # have to examine ancestors of existing heads
809 798 if ctxisnew:
810 799 iterrevs = sorted(newheadrevs)
811 800 else:
812 801 iterrevs = list(bheadrevs)
813 802
814 803 # This loop prunes out two kinds of heads - heads that are
815 804 # superseded by a head in newheadrevs, and newheadrevs that are not
816 805 # heads because an existing head is their descendant.
817 806 while iterrevs:
818 807 latest = iterrevs.pop()
819 808 if latest not in bheadrevs:
820 809 continue
821 810 ancestors = set(self.changelog.ancestors([latest],
822 811 bheadrevs[0]))
823 812 if ancestors:
824 813 bheadrevs = [b for b in bheadrevs if b not in ancestors]
825 814 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
826 815
827 816 # There may be branches that cease to exist when the last commit in the
828 817 # branch was stripped. This code filters them out. Note that the
829 818 # branch that ceased to exist may not be in newbranches because
830 819 # newbranches is the set of candidate heads, which when you strip the
831 820 # last commit in a branch will be the parent branch.
832 821 for branch in partial.keys():
833 822 nodes = [head for head in partial[branch]
834 823 if self.changelog.hasnode(head)]
835 824 if not nodes:
836 825 del partial[branch]
837 826
838 827 def lookup(self, key):
839 828 return self[key].node()
840 829
841 830 def lookupbranch(self, key, remote=None):
842 831 repo = remote or self
843 832 if key in repo.branchmap():
844 833 return key
845 834
846 835 repo = (remote and remote.local()) and remote or self
847 836 return repo[key].branch()
848 837
849 838 def known(self, nodes):
850 839 nm = self.changelog.nodemap
851 840 pc = self._phasecache
852 841 result = []
853 842 for n in nodes:
854 843 r = nm.get(n)
855 844 resp = not (r is None or pc.phase(self, r) >= phases.secret)
856 845 result.append(resp)
857 846 return result
858 847
859 848 def local(self):
860 849 return self
861 850
862 851 def cancopy(self):
863 852 return self.local() # so statichttprepo's override of local() works
864 853
865 854 def join(self, f):
866 855 return os.path.join(self.path, f)
867 856
868 857 def wjoin(self, f):
869 858 return os.path.join(self.root, f)
870 859
871 860 def file(self, f):
872 861 if f[0] == '/':
873 862 f = f[1:]
874 863 return filelog.filelog(self.sopener, f)
875 864
876 865 def changectx(self, changeid):
877 866 return self[changeid]
878 867
879 868 def parents(self, changeid=None):
880 869 '''get list of changectxs for parents of changeid'''
881 870 return self[changeid].parents()
882 871
883 872 def setparents(self, p1, p2=nullid):
884 873 copies = self.dirstate.setparents(p1, p2)
885 874 if copies:
886 875 # Adjust copy records, the dirstate cannot do it, it
887 876 # requires access to parents manifests. Preserve them
888 877 # only for entries added to first parent.
889 878 pctx = self[p1]
890 879 for f in copies:
891 880 if f not in pctx and copies[f] in pctx:
892 881 self.dirstate.copy(copies[f], f)
893 882
894 883 def filectx(self, path, changeid=None, fileid=None):
895 884 """changeid can be a changeset revision, node, or tag.
896 885 fileid can be a file revision or node."""
897 886 return context.filectx(self, path, changeid, fileid)
898 887
899 888 def getcwd(self):
900 889 return self.dirstate.getcwd()
901 890
902 891 def pathto(self, f, cwd=None):
903 892 return self.dirstate.pathto(f, cwd)
904 893
905 894 def wfile(self, f, mode='r'):
906 895 return self.wopener(f, mode)
907 896
908 897 def _link(self, f):
909 898 return os.path.islink(self.wjoin(f))
910 899
911 900 def _loadfilter(self, filter):
912 901 if filter not in self.filterpats:
913 902 l = []
914 903 for pat, cmd in self.ui.configitems(filter):
915 904 if cmd == '!':
916 905 continue
917 906 mf = matchmod.match(self.root, '', [pat])
918 907 fn = None
919 908 params = cmd
920 909 for name, filterfn in self._datafilters.iteritems():
921 910 if cmd.startswith(name):
922 911 fn = filterfn
923 912 params = cmd[len(name):].lstrip()
924 913 break
925 914 if not fn:
926 915 fn = lambda s, c, **kwargs: util.filter(s, c)
927 916 # Wrap old filters not supporting keyword arguments
928 917 if not inspect.getargspec(fn)[2]:
929 918 oldfn = fn
930 919 fn = lambda s, c, **kwargs: oldfn(s, c)
931 920 l.append((mf, fn, params))
932 921 self.filterpats[filter] = l
933 922 return self.filterpats[filter]
934 923
935 924 def _filter(self, filterpats, filename, data):
936 925 for mf, fn, cmd in filterpats:
937 926 if mf(filename):
938 927 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
939 928 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
940 929 break
941 930
942 931 return data
943 932
944 933 @unfilteredpropertycache
945 934 def _encodefilterpats(self):
946 935 return self._loadfilter('encode')
947 936
948 937 @unfilteredpropertycache
949 938 def _decodefilterpats(self):
950 939 return self._loadfilter('decode')
951 940
952 941 def adddatafilter(self, name, filter):
953 942 self._datafilters[name] = filter
954 943
955 944 def wread(self, filename):
956 945 if self._link(filename):
957 946 data = os.readlink(self.wjoin(filename))
958 947 else:
959 948 data = self.wopener.read(filename)
960 949 return self._filter(self._encodefilterpats, filename, data)
961 950
962 951 def wwrite(self, filename, data, flags):
963 952 data = self._filter(self._decodefilterpats, filename, data)
964 953 if 'l' in flags:
965 954 self.wopener.symlink(data, filename)
966 955 else:
967 956 self.wopener.write(filename, data)
968 957 if 'x' in flags:
969 958 util.setflags(self.wjoin(filename), False, True)
970 959
971 960 def wwritedata(self, filename, data):
972 961 return self._filter(self._decodefilterpats, filename, data)
973 962
974 963 def transaction(self, desc):
975 964 tr = self._transref and self._transref() or None
976 965 if tr and tr.running():
977 966 return tr.nest()
978 967
979 968 # abort here if the journal already exists
980 969 if os.path.exists(self.sjoin("journal")):
981 970 raise error.RepoError(
982 971 _("abandoned transaction found - run hg recover"))
983 972
984 973 self._writejournal(desc)
985 974 renames = [(x, undoname(x)) for x in self._journalfiles()]
986 975
987 976 tr = transaction.transaction(self.ui.warn, self.sopener,
988 977 self.sjoin("journal"),
989 978 aftertrans(renames),
990 979 self.store.createmode)
991 980 self._transref = weakref.ref(tr)
992 981 return tr
993 982
994 983 def _journalfiles(self):
995 984 return (self.sjoin('journal'), self.join('journal.dirstate'),
996 985 self.join('journal.branch'), self.join('journal.desc'),
997 986 self.join('journal.bookmarks'),
998 987 self.sjoin('journal.phaseroots'))
999 988
1000 989 def undofiles(self):
1001 990 return [undoname(x) for x in self._journalfiles()]
1002 991
1003 992 def _writejournal(self, desc):
1004 993 self.opener.write("journal.dirstate",
1005 994 self.opener.tryread("dirstate"))
1006 995 self.opener.write("journal.branch",
1007 996 encoding.fromlocal(self.dirstate.branch()))
1008 997 self.opener.write("journal.desc",
1009 998 "%d\n%s\n" % (len(self), desc))
1010 999 self.opener.write("journal.bookmarks",
1011 1000 self.opener.tryread("bookmarks"))
1012 1001 self.sopener.write("journal.phaseroots",
1013 1002 self.sopener.tryread("phaseroots"))
1014 1003
1015 1004 def recover(self):
1016 1005 lock = self.lock()
1017 1006 try:
1018 1007 if os.path.exists(self.sjoin("journal")):
1019 1008 self.ui.status(_("rolling back interrupted transaction\n"))
1020 1009 transaction.rollback(self.sopener, self.sjoin("journal"),
1021 1010 self.ui.warn)
1022 1011 self.invalidate()
1023 1012 return True
1024 1013 else:
1025 1014 self.ui.warn(_("no interrupted transaction available\n"))
1026 1015 return False
1027 1016 finally:
1028 1017 lock.release()
1029 1018
1030 1019 def rollback(self, dryrun=False, force=False):
1031 1020 wlock = lock = None
1032 1021 try:
1033 1022 wlock = self.wlock()
1034 1023 lock = self.lock()
1035 1024 if os.path.exists(self.sjoin("undo")):
1036 1025 return self._rollback(dryrun, force)
1037 1026 else:
1038 1027 self.ui.warn(_("no rollback information available\n"))
1039 1028 return 1
1040 1029 finally:
1041 1030 release(lock, wlock)
1042 1031
1043 1032 @unfilteredmethod # Until we get smarter cache management
1044 1033 def _rollback(self, dryrun, force):
1045 1034 ui = self.ui
1046 1035 try:
1047 1036 args = self.opener.read('undo.desc').splitlines()
1048 1037 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1049 1038 if len(args) >= 3:
1050 1039 detail = args[2]
1051 1040 oldtip = oldlen - 1
1052 1041
1053 1042 if detail and ui.verbose:
1054 1043 msg = (_('repository tip rolled back to revision %s'
1055 1044 ' (undo %s: %s)\n')
1056 1045 % (oldtip, desc, detail))
1057 1046 else:
1058 1047 msg = (_('repository tip rolled back to revision %s'
1059 1048 ' (undo %s)\n')
1060 1049 % (oldtip, desc))
1061 1050 except IOError:
1062 1051 msg = _('rolling back unknown transaction\n')
1063 1052 desc = None
1064 1053
1065 1054 if not force and self['.'] != self['tip'] and desc == 'commit':
1066 1055 raise util.Abort(
1067 1056 _('rollback of last commit while not checked out '
1068 1057 'may lose data'), hint=_('use -f to force'))
1069 1058
1070 1059 ui.status(msg)
1071 1060 if dryrun:
1072 1061 return 0
1073 1062
1074 1063 parents = self.dirstate.parents()
1075 1064 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1076 1065 if os.path.exists(self.join('undo.bookmarks')):
1077 1066 util.rename(self.join('undo.bookmarks'),
1078 1067 self.join('bookmarks'))
1079 1068 if os.path.exists(self.sjoin('undo.phaseroots')):
1080 1069 util.rename(self.sjoin('undo.phaseroots'),
1081 1070 self.sjoin('phaseroots'))
1082 1071 self.invalidate()
1083 1072
1084 1073 # Discard all cache entries to force reloading everything.
1085 1074 self._filecache.clear()
1086 1075
1087 1076 parentgone = (parents[0] not in self.changelog.nodemap or
1088 1077 parents[1] not in self.changelog.nodemap)
1089 1078 if parentgone:
1090 1079 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1091 1080 try:
1092 1081 branch = self.opener.read('undo.branch')
1093 1082 self.dirstate.setbranch(encoding.tolocal(branch))
1094 1083 except IOError:
1095 1084 ui.warn(_('named branch could not be reset: '
1096 1085 'current branch is still \'%s\'\n')
1097 1086 % self.dirstate.branch())
1098 1087
1099 1088 self.dirstate.invalidate()
1100 1089 parents = tuple([p.rev() for p in self.parents()])
1101 1090 if len(parents) > 1:
1102 1091 ui.status(_('working directory now based on '
1103 1092 'revisions %d and %d\n') % parents)
1104 1093 else:
1105 1094 ui.status(_('working directory now based on '
1106 1095 'revision %d\n') % parents)
1107 1096 # TODO: if we know which new heads may result from this rollback, pass
1108 1097 # them to destroy(), which will prevent the branchhead cache from being
1109 1098 # invalidated.
1110 1099 self.destroyed()
1111 1100 return 0
1112 1101
1113 1102 def invalidatecaches(self):
1114 1103
1115 1104 if '_tagscache' in vars(self):
1116 1105 # can't use delattr on proxy
1117 1106 del self.__dict__['_tagscache']
1118 1107
1119 1108 self.unfiltered()._branchcache = None # in UTF-8
1120 1109 self.unfiltered()._branchcachetip = None
1121 1110 self.invalidatevolatilesets()
1122 1111
1123 1112 def invalidatevolatilesets(self):
1124 1113 self.filteredrevcache.clear()
1125 1114 obsolete.clearobscaches(self)
1126 1115 if 'hiddenrevs' in vars(self):
1127 1116 del self.hiddenrevs
1128 1117
1129 1118 def invalidatedirstate(self):
1130 1119 '''Invalidates the dirstate, causing the next call to dirstate
1131 1120 to check if it was modified since the last time it was read,
1132 1121 rereading it if it has.
1133 1122
1134 1123 This is different to dirstate.invalidate() that it doesn't always
1135 1124 rereads the dirstate. Use dirstate.invalidate() if you want to
1136 1125 explicitly read the dirstate again (i.e. restoring it to a previous
1137 1126 known good state).'''
1138 1127 if hasunfilteredcache(self, 'dirstate'):
1139 1128 for k in self.dirstate._filecache:
1140 1129 try:
1141 1130 delattr(self.dirstate, k)
1142 1131 except AttributeError:
1143 1132 pass
1144 1133 delattr(self.unfiltered(), 'dirstate')
1145 1134
1146 1135 def invalidate(self):
1147 1136 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1148 1137 for k in self._filecache:
1149 1138 # dirstate is invalidated separately in invalidatedirstate()
1150 1139 if k == 'dirstate':
1151 1140 continue
1152 1141
1153 1142 try:
1154 1143 delattr(unfiltered, k)
1155 1144 except AttributeError:
1156 1145 pass
1157 1146 self.invalidatecaches()
1158 1147
1159 1148 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1160 1149 try:
1161 1150 l = lock.lock(lockname, 0, releasefn, desc=desc)
1162 1151 except error.LockHeld, inst:
1163 1152 if not wait:
1164 1153 raise
1165 1154 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1166 1155 (desc, inst.locker))
1167 1156 # default to 600 seconds timeout
1168 1157 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1169 1158 releasefn, desc=desc)
1170 1159 if acquirefn:
1171 1160 acquirefn()
1172 1161 return l
1173 1162
1174 1163 def _afterlock(self, callback):
1175 1164 """add a callback to the current repository lock.
1176 1165
1177 1166 The callback will be executed on lock release."""
1178 1167 l = self._lockref and self._lockref()
1179 1168 if l:
1180 1169 l.postrelease.append(callback)
1181 1170 else:
1182 1171 callback()
1183 1172
1184 1173 def lock(self, wait=True):
1185 1174 '''Lock the repository store (.hg/store) and return a weak reference
1186 1175 to the lock. Use this before modifying the store (e.g. committing or
1187 1176 stripping). If you are opening a transaction, get a lock as well.)'''
1188 1177 l = self._lockref and self._lockref()
1189 1178 if l is not None and l.held:
1190 1179 l.lock()
1191 1180 return l
1192 1181
1193 1182 def unlock():
1194 1183 self.store.write()
1195 1184 if hasunfilteredcache(self, '_phasecache'):
1196 1185 self._phasecache.write()
1197 1186 for k, ce in self._filecache.items():
1198 1187 if k == 'dirstate':
1199 1188 continue
1200 1189 ce.refresh()
1201 1190
1202 1191 l = self._lock(self.sjoin("lock"), wait, unlock,
1203 1192 self.invalidate, _('repository %s') % self.origroot)
1204 1193 self._lockref = weakref.ref(l)
1205 1194 return l
1206 1195
1207 1196 def wlock(self, wait=True):
1208 1197 '''Lock the non-store parts of the repository (everything under
1209 1198 .hg except .hg/store) and return a weak reference to the lock.
1210 1199 Use this before modifying files in .hg.'''
1211 1200 l = self._wlockref and self._wlockref()
1212 1201 if l is not None and l.held:
1213 1202 l.lock()
1214 1203 return l
1215 1204
1216 1205 def unlock():
1217 1206 self.dirstate.write()
1218 1207 ce = self._filecache.get('dirstate')
1219 1208 if ce:
1220 1209 ce.refresh()
1221 1210
1222 1211 l = self._lock(self.join("wlock"), wait, unlock,
1223 1212 self.invalidatedirstate, _('working directory of %s') %
1224 1213 self.origroot)
1225 1214 self._wlockref = weakref.ref(l)
1226 1215 return l
1227 1216
1228 1217 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1229 1218 """
1230 1219 commit an individual file as part of a larger transaction
1231 1220 """
1232 1221
1233 1222 fname = fctx.path()
1234 1223 text = fctx.data()
1235 1224 flog = self.file(fname)
1236 1225 fparent1 = manifest1.get(fname, nullid)
1237 1226 fparent2 = fparent2o = manifest2.get(fname, nullid)
1238 1227
1239 1228 meta = {}
1240 1229 copy = fctx.renamed()
1241 1230 if copy and copy[0] != fname:
1242 1231 # Mark the new revision of this file as a copy of another
1243 1232 # file. This copy data will effectively act as a parent
1244 1233 # of this new revision. If this is a merge, the first
1245 1234 # parent will be the nullid (meaning "look up the copy data")
1246 1235 # and the second one will be the other parent. For example:
1247 1236 #
1248 1237 # 0 --- 1 --- 3 rev1 changes file foo
1249 1238 # \ / rev2 renames foo to bar and changes it
1250 1239 # \- 2 -/ rev3 should have bar with all changes and
1251 1240 # should record that bar descends from
1252 1241 # bar in rev2 and foo in rev1
1253 1242 #
1254 1243 # this allows this merge to succeed:
1255 1244 #
1256 1245 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1257 1246 # \ / merging rev3 and rev4 should use bar@rev2
1258 1247 # \- 2 --- 4 as the merge base
1259 1248 #
1260 1249
1261 1250 cfname = copy[0]
1262 1251 crev = manifest1.get(cfname)
1263 1252 newfparent = fparent2
1264 1253
1265 1254 if manifest2: # branch merge
1266 1255 if fparent2 == nullid or crev is None: # copied on remote side
1267 1256 if cfname in manifest2:
1268 1257 crev = manifest2[cfname]
1269 1258 newfparent = fparent1
1270 1259
1271 1260 # find source in nearest ancestor if we've lost track
1272 1261 if not crev:
1273 1262 self.ui.debug(" %s: searching for copy revision for %s\n" %
1274 1263 (fname, cfname))
1275 1264 for ancestor in self[None].ancestors():
1276 1265 if cfname in ancestor:
1277 1266 crev = ancestor[cfname].filenode()
1278 1267 break
1279 1268
1280 1269 if crev:
1281 1270 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1282 1271 meta["copy"] = cfname
1283 1272 meta["copyrev"] = hex(crev)
1284 1273 fparent1, fparent2 = nullid, newfparent
1285 1274 else:
1286 1275 self.ui.warn(_("warning: can't find ancestor for '%s' "
1287 1276 "copied from '%s'!\n") % (fname, cfname))
1288 1277
1289 1278 elif fparent2 != nullid:
1290 1279 # is one parent an ancestor of the other?
1291 1280 fparentancestor = flog.ancestor(fparent1, fparent2)
1292 1281 if fparentancestor == fparent1:
1293 1282 fparent1, fparent2 = fparent2, nullid
1294 1283 elif fparentancestor == fparent2:
1295 1284 fparent2 = nullid
1296 1285
1297 1286 # is the file changed?
1298 1287 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1299 1288 changelist.append(fname)
1300 1289 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1301 1290
1302 1291 # are just the flags changed during merge?
1303 1292 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1304 1293 changelist.append(fname)
1305 1294
1306 1295 return fparent1
1307 1296
1308 1297 @unfilteredmethod
1309 1298 def commit(self, text="", user=None, date=None, match=None, force=False,
1310 1299 editor=False, extra={}):
1311 1300 """Add a new revision to current repository.
1312 1301
1313 1302 Revision information is gathered from the working directory,
1314 1303 match can be used to filter the committed files. If editor is
1315 1304 supplied, it is called to get a commit message.
1316 1305 """
1317 1306
1318 1307 def fail(f, msg):
1319 1308 raise util.Abort('%s: %s' % (f, msg))
1320 1309
1321 1310 if not match:
1322 1311 match = matchmod.always(self.root, '')
1323 1312
1324 1313 if not force:
1325 1314 vdirs = []
1326 1315 match.dir = vdirs.append
1327 1316 match.bad = fail
1328 1317
1329 1318 wlock = self.wlock()
1330 1319 try:
1331 1320 wctx = self[None]
1332 1321 merge = len(wctx.parents()) > 1
1333 1322
1334 1323 if (not force and merge and match and
1335 1324 (match.files() or match.anypats())):
1336 1325 raise util.Abort(_('cannot partially commit a merge '
1337 1326 '(do not specify files or patterns)'))
1338 1327
1339 1328 changes = self.status(match=match, clean=force)
1340 1329 if force:
1341 1330 changes[0].extend(changes[6]) # mq may commit unchanged files
1342 1331
1343 1332 # check subrepos
1344 1333 subs = []
1345 1334 commitsubs = set()
1346 1335 newstate = wctx.substate.copy()
1347 1336 # only manage subrepos and .hgsubstate if .hgsub is present
1348 1337 if '.hgsub' in wctx:
1349 1338 # we'll decide whether to track this ourselves, thanks
1350 1339 if '.hgsubstate' in changes[0]:
1351 1340 changes[0].remove('.hgsubstate')
1352 1341 if '.hgsubstate' in changes[2]:
1353 1342 changes[2].remove('.hgsubstate')
1354 1343
1355 1344 # compare current state to last committed state
1356 1345 # build new substate based on last committed state
1357 1346 oldstate = wctx.p1().substate
1358 1347 for s in sorted(newstate.keys()):
1359 1348 if not match(s):
1360 1349 # ignore working copy, use old state if present
1361 1350 if s in oldstate:
1362 1351 newstate[s] = oldstate[s]
1363 1352 continue
1364 1353 if not force:
1365 1354 raise util.Abort(
1366 1355 _("commit with new subrepo %s excluded") % s)
1367 1356 if wctx.sub(s).dirty(True):
1368 1357 if not self.ui.configbool('ui', 'commitsubrepos'):
1369 1358 raise util.Abort(
1370 1359 _("uncommitted changes in subrepo %s") % s,
1371 1360 hint=_("use --subrepos for recursive commit"))
1372 1361 subs.append(s)
1373 1362 commitsubs.add(s)
1374 1363 else:
1375 1364 bs = wctx.sub(s).basestate()
1376 1365 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1377 1366 if oldstate.get(s, (None, None, None))[1] != bs:
1378 1367 subs.append(s)
1379 1368
1380 1369 # check for removed subrepos
1381 1370 for p in wctx.parents():
1382 1371 r = [s for s in p.substate if s not in newstate]
1383 1372 subs += [s for s in r if match(s)]
1384 1373 if subs:
1385 1374 if (not match('.hgsub') and
1386 1375 '.hgsub' in (wctx.modified() + wctx.added())):
1387 1376 raise util.Abort(
1388 1377 _("can't commit subrepos without .hgsub"))
1389 1378 changes[0].insert(0, '.hgsubstate')
1390 1379
1391 1380 elif '.hgsub' in changes[2]:
1392 1381 # clean up .hgsubstate when .hgsub is removed
1393 1382 if ('.hgsubstate' in wctx and
1394 1383 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1395 1384 changes[2].insert(0, '.hgsubstate')
1396 1385
1397 1386 # make sure all explicit patterns are matched
1398 1387 if not force and match.files():
1399 1388 matched = set(changes[0] + changes[1] + changes[2])
1400 1389
1401 1390 for f in match.files():
1402 1391 f = self.dirstate.normalize(f)
1403 1392 if f == '.' or f in matched or f in wctx.substate:
1404 1393 continue
1405 1394 if f in changes[3]: # missing
1406 1395 fail(f, _('file not found!'))
1407 1396 if f in vdirs: # visited directory
1408 1397 d = f + '/'
1409 1398 for mf in matched:
1410 1399 if mf.startswith(d):
1411 1400 break
1412 1401 else:
1413 1402 fail(f, _("no match under directory!"))
1414 1403 elif f not in self.dirstate:
1415 1404 fail(f, _("file not tracked!"))
1416 1405
1417 1406 if (not force and not extra.get("close") and not merge
1418 1407 and not (changes[0] or changes[1] or changes[2])
1419 1408 and wctx.branch() == wctx.p1().branch()):
1420 1409 return None
1421 1410
1422 1411 if merge and changes[3]:
1423 1412 raise util.Abort(_("cannot commit merge with missing files"))
1424 1413
1425 1414 ms = mergemod.mergestate(self)
1426 1415 for f in changes[0]:
1427 1416 if f in ms and ms[f] == 'u':
1428 1417 raise util.Abort(_("unresolved merge conflicts "
1429 1418 "(see hg help resolve)"))
1430 1419
1431 1420 cctx = context.workingctx(self, text, user, date, extra, changes)
1432 1421 if editor:
1433 1422 cctx._text = editor(self, cctx, subs)
1434 1423 edited = (text != cctx._text)
1435 1424
1436 1425 # commit subs and write new state
1437 1426 if subs:
1438 1427 for s in sorted(commitsubs):
1439 1428 sub = wctx.sub(s)
1440 1429 self.ui.status(_('committing subrepository %s\n') %
1441 1430 subrepo.subrelpath(sub))
1442 1431 sr = sub.commit(cctx._text, user, date)
1443 1432 newstate[s] = (newstate[s][0], sr)
1444 1433 subrepo.writestate(self, newstate)
1445 1434
1446 1435 # Save commit message in case this transaction gets rolled back
1447 1436 # (e.g. by a pretxncommit hook). Leave the content alone on
1448 1437 # the assumption that the user will use the same editor again.
1449 1438 msgfn = self.savecommitmessage(cctx._text)
1450 1439
1451 1440 p1, p2 = self.dirstate.parents()
1452 1441 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1453 1442 try:
1454 1443 self.hook("precommit", throw=True, parent1=hookp1,
1455 1444 parent2=hookp2)
1456 1445 ret = self.commitctx(cctx, True)
1457 1446 except: # re-raises
1458 1447 if edited:
1459 1448 self.ui.write(
1460 1449 _('note: commit message saved in %s\n') % msgfn)
1461 1450 raise
1462 1451
1463 1452 # update bookmarks, dirstate and mergestate
1464 1453 bookmarks.update(self, [p1, p2], ret)
1465 1454 for f in changes[0] + changes[1]:
1466 1455 self.dirstate.normal(f)
1467 1456 for f in changes[2]:
1468 1457 self.dirstate.drop(f)
1469 1458 self.dirstate.setparents(ret)
1470 1459 ms.reset()
1471 1460 finally:
1472 1461 wlock.release()
1473 1462
1474 1463 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1475 1464 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1476 1465 self._afterlock(commithook)
1477 1466 return ret
1478 1467
1479 1468 @unfilteredmethod
1480 1469 def commitctx(self, ctx, error=False):
1481 1470 """Add a new revision to current repository.
1482 1471 Revision information is passed via the context argument.
1483 1472 """
1484 1473
1485 1474 tr = lock = None
1486 1475 removed = list(ctx.removed())
1487 1476 p1, p2 = ctx.p1(), ctx.p2()
1488 1477 user = ctx.user()
1489 1478
1490 1479 lock = self.lock()
1491 1480 try:
1492 1481 tr = self.transaction("commit")
1493 1482 trp = weakref.proxy(tr)
1494 1483
1495 1484 if ctx.files():
1496 1485 m1 = p1.manifest().copy()
1497 1486 m2 = p2.manifest()
1498 1487
1499 1488 # check in files
1500 1489 new = {}
1501 1490 changed = []
1502 1491 linkrev = len(self)
1503 1492 for f in sorted(ctx.modified() + ctx.added()):
1504 1493 self.ui.note(f + "\n")
1505 1494 try:
1506 1495 fctx = ctx[f]
1507 1496 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1508 1497 changed)
1509 1498 m1.set(f, fctx.flags())
1510 1499 except OSError, inst:
1511 1500 self.ui.warn(_("trouble committing %s!\n") % f)
1512 1501 raise
1513 1502 except IOError, inst:
1514 1503 errcode = getattr(inst, 'errno', errno.ENOENT)
1515 1504 if error or errcode and errcode != errno.ENOENT:
1516 1505 self.ui.warn(_("trouble committing %s!\n") % f)
1517 1506 raise
1518 1507 else:
1519 1508 removed.append(f)
1520 1509
1521 1510 # update manifest
1522 1511 m1.update(new)
1523 1512 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1524 1513 drop = [f for f in removed if f in m1]
1525 1514 for f in drop:
1526 1515 del m1[f]
1527 1516 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1528 1517 p2.manifestnode(), (new, drop))
1529 1518 files = changed + removed
1530 1519 else:
1531 1520 mn = p1.manifestnode()
1532 1521 files = []
1533 1522
1534 1523 # update changelog
1535 1524 self.changelog.delayupdate()
1536 1525 n = self.changelog.add(mn, files, ctx.description(),
1537 1526 trp, p1.node(), p2.node(),
1538 1527 user, ctx.date(), ctx.extra().copy())
1539 1528 p = lambda: self.changelog.writepending() and self.root or ""
1540 1529 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1541 1530 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1542 1531 parent2=xp2, pending=p)
1543 1532 self.changelog.finalize(trp)
1544 1533 # set the new commit is proper phase
1545 1534 targetphase = phases.newcommitphase(self.ui)
1546 1535 if targetphase:
1547 1536 # retract boundary do not alter parent changeset.
1548 1537 # if a parent have higher the resulting phase will
1549 1538 # be compliant anyway
1550 1539 #
1551 1540 # if minimal phase was 0 we don't need to retract anything
1552 1541 phases.retractboundary(self, targetphase, [n])
1553 1542 tr.close()
1554 1543 self.updatebranchcache()
1555 1544 return n
1556 1545 finally:
1557 1546 if tr:
1558 1547 tr.release()
1559 1548 lock.release()
1560 1549
1561 1550 @unfilteredmethod
1562 1551 def destroyed(self, newheadnodes=None):
1563 1552 '''Inform the repository that nodes have been destroyed.
1564 1553 Intended for use by strip and rollback, so there's a common
1565 1554 place for anything that has to be done after destroying history.
1566 1555
1567 1556 If you know the branchheadcache was uptodate before nodes were removed
1568 1557 and you also know the set of candidate new heads that may have resulted
1569 1558 from the destruction, you can set newheadnodes. This will enable the
1570 1559 code to update the branchheads cache, rather than having future code
1571 1560 decide it's invalid and regenerating it from scratch.
1572 1561 '''
1573 1562 # If we have info, newheadnodes, on how to update the branch cache, do
1574 1563 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1575 1564 # will be caught the next time it is read.
1576 1565 if newheadnodes:
1577 1566 tiprev = len(self) - 1
1578 1567 ctxgen = (self[node] for node in newheadnodes
1579 1568 if self.changelog.hasnode(node))
1580 1569 self._updatebranchcache(self._branchcache, ctxgen)
1581 self._writebranchcache(self._branchcache, self.changelog.tip(),
1582 tiprev)
1570 branchmap.write(self, self._branchcache, self.changelog.tip(),
1571 tiprev)
1583 1572
1584 1573 # Ensure the persistent tag cache is updated. Doing it now
1585 1574 # means that the tag cache only has to worry about destroyed
1586 1575 # heads immediately after a strip/rollback. That in turn
1587 1576 # guarantees that "cachetip == currenttip" (comparing both rev
1588 1577 # and node) always means no nodes have been added or destroyed.
1589 1578
1590 1579 # XXX this is suboptimal when qrefresh'ing: we strip the current
1591 1580 # head, refresh the tag cache, then immediately add a new head.
1592 1581 # But I think doing it this way is necessary for the "instant
1593 1582 # tag cache retrieval" case to work.
1594 1583 self.invalidatecaches()
1595 1584
1596 1585 # Discard all cache entries to force reloading everything.
1597 1586 self._filecache.clear()
1598 1587
1599 1588 def walk(self, match, node=None):
1600 1589 '''
1601 1590 walk recursively through the directory tree or a given
1602 1591 changeset, finding all files matched by the match
1603 1592 function
1604 1593 '''
1605 1594 return self[node].walk(match)
1606 1595
1607 1596 def status(self, node1='.', node2=None, match=None,
1608 1597 ignored=False, clean=False, unknown=False,
1609 1598 listsubrepos=False):
1610 1599 """return status of files between two nodes or node and working
1611 1600 directory.
1612 1601
1613 1602 If node1 is None, use the first dirstate parent instead.
1614 1603 If node2 is None, compare node1 with working directory.
1615 1604 """
1616 1605
1617 1606 def mfmatches(ctx):
1618 1607 mf = ctx.manifest().copy()
1619 1608 if match.always():
1620 1609 return mf
1621 1610 for fn in mf.keys():
1622 1611 if not match(fn):
1623 1612 del mf[fn]
1624 1613 return mf
1625 1614
1626 1615 if isinstance(node1, context.changectx):
1627 1616 ctx1 = node1
1628 1617 else:
1629 1618 ctx1 = self[node1]
1630 1619 if isinstance(node2, context.changectx):
1631 1620 ctx2 = node2
1632 1621 else:
1633 1622 ctx2 = self[node2]
1634 1623
1635 1624 working = ctx2.rev() is None
1636 1625 parentworking = working and ctx1 == self['.']
1637 1626 match = match or matchmod.always(self.root, self.getcwd())
1638 1627 listignored, listclean, listunknown = ignored, clean, unknown
1639 1628
1640 1629 # load earliest manifest first for caching reasons
1641 1630 if not working and ctx2.rev() < ctx1.rev():
1642 1631 ctx2.manifest()
1643 1632
1644 1633 if not parentworking:
1645 1634 def bad(f, msg):
1646 1635 # 'f' may be a directory pattern from 'match.files()',
1647 1636 # so 'f not in ctx1' is not enough
1648 1637 if f not in ctx1 and f not in ctx1.dirs():
1649 1638 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1650 1639 match.bad = bad
1651 1640
1652 1641 if working: # we need to scan the working dir
1653 1642 subrepos = []
1654 1643 if '.hgsub' in self.dirstate:
1655 1644 subrepos = ctx2.substate.keys()
1656 1645 s = self.dirstate.status(match, subrepos, listignored,
1657 1646 listclean, listunknown)
1658 1647 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1659 1648
1660 1649 # check for any possibly clean files
1661 1650 if parentworking and cmp:
1662 1651 fixup = []
1663 1652 # do a full compare of any files that might have changed
1664 1653 for f in sorted(cmp):
1665 1654 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1666 1655 or ctx1[f].cmp(ctx2[f])):
1667 1656 modified.append(f)
1668 1657 else:
1669 1658 fixup.append(f)
1670 1659
1671 1660 # update dirstate for files that are actually clean
1672 1661 if fixup:
1673 1662 if listclean:
1674 1663 clean += fixup
1675 1664
1676 1665 try:
1677 1666 # updating the dirstate is optional
1678 1667 # so we don't wait on the lock
1679 1668 wlock = self.wlock(False)
1680 1669 try:
1681 1670 for f in fixup:
1682 1671 self.dirstate.normal(f)
1683 1672 finally:
1684 1673 wlock.release()
1685 1674 except error.LockError:
1686 1675 pass
1687 1676
1688 1677 if not parentworking:
1689 1678 mf1 = mfmatches(ctx1)
1690 1679 if working:
1691 1680 # we are comparing working dir against non-parent
1692 1681 # generate a pseudo-manifest for the working dir
1693 1682 mf2 = mfmatches(self['.'])
1694 1683 for f in cmp + modified + added:
1695 1684 mf2[f] = None
1696 1685 mf2.set(f, ctx2.flags(f))
1697 1686 for f in removed:
1698 1687 if f in mf2:
1699 1688 del mf2[f]
1700 1689 else:
1701 1690 # we are comparing two revisions
1702 1691 deleted, unknown, ignored = [], [], []
1703 1692 mf2 = mfmatches(ctx2)
1704 1693
1705 1694 modified, added, clean = [], [], []
1706 1695 withflags = mf1.withflags() | mf2.withflags()
1707 1696 for fn in mf2:
1708 1697 if fn in mf1:
1709 1698 if (fn not in deleted and
1710 1699 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1711 1700 (mf1[fn] != mf2[fn] and
1712 1701 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1713 1702 modified.append(fn)
1714 1703 elif listclean:
1715 1704 clean.append(fn)
1716 1705 del mf1[fn]
1717 1706 elif fn not in deleted:
1718 1707 added.append(fn)
1719 1708 removed = mf1.keys()
1720 1709
1721 1710 if working and modified and not self.dirstate._checklink:
1722 1711 # Symlink placeholders may get non-symlink-like contents
1723 1712 # via user error or dereferencing by NFS or Samba servers,
1724 1713 # so we filter out any placeholders that don't look like a
1725 1714 # symlink
1726 1715 sane = []
1727 1716 for f in modified:
1728 1717 if ctx2.flags(f) == 'l':
1729 1718 d = ctx2[f].data()
1730 1719 if len(d) >= 1024 or '\n' in d or util.binary(d):
1731 1720 self.ui.debug('ignoring suspect symlink placeholder'
1732 1721 ' "%s"\n' % f)
1733 1722 continue
1734 1723 sane.append(f)
1735 1724 modified = sane
1736 1725
1737 1726 r = modified, added, removed, deleted, unknown, ignored, clean
1738 1727
1739 1728 if listsubrepos:
1740 1729 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1741 1730 if working:
1742 1731 rev2 = None
1743 1732 else:
1744 1733 rev2 = ctx2.substate[subpath][1]
1745 1734 try:
1746 1735 submatch = matchmod.narrowmatcher(subpath, match)
1747 1736 s = sub.status(rev2, match=submatch, ignored=listignored,
1748 1737 clean=listclean, unknown=listunknown,
1749 1738 listsubrepos=True)
1750 1739 for rfiles, sfiles in zip(r, s):
1751 1740 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1752 1741 except error.LookupError:
1753 1742 self.ui.status(_("skipping missing subrepository: %s\n")
1754 1743 % subpath)
1755 1744
1756 1745 for l in r:
1757 1746 l.sort()
1758 1747 return r
1759 1748
1760 1749 def heads(self, start=None):
1761 1750 heads = self.changelog.heads(start)
1762 1751 # sort the output in rev descending order
1763 1752 return sorted(heads, key=self.changelog.rev, reverse=True)
1764 1753
1765 1754 def branchheads(self, branch=None, start=None, closed=False):
1766 1755 '''return a (possibly filtered) list of heads for the given branch
1767 1756
1768 1757 Heads are returned in topological order, from newest to oldest.
1769 1758 If branch is None, use the dirstate branch.
1770 1759 If start is not None, return only heads reachable from start.
1771 1760 If closed is True, return heads that are marked as closed as well.
1772 1761 '''
1773 1762 if branch is None:
1774 1763 branch = self[None].branch()
1775 1764 branches = self.branchmap()
1776 1765 if branch not in branches:
1777 1766 return []
1778 1767 # the cache returns heads ordered lowest to highest
1779 1768 bheads = list(reversed(branches[branch]))
1780 1769 if start is not None:
1781 1770 # filter out the heads that cannot be reached from startrev
1782 1771 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1783 1772 bheads = [h for h in bheads if h in fbheads]
1784 1773 if not closed:
1785 1774 bheads = [h for h in bheads if not self[h].closesbranch()]
1786 1775 return bheads
1787 1776
1788 1777 def branches(self, nodes):
1789 1778 if not nodes:
1790 1779 nodes = [self.changelog.tip()]
1791 1780 b = []
1792 1781 for n in nodes:
1793 1782 t = n
1794 1783 while True:
1795 1784 p = self.changelog.parents(n)
1796 1785 if p[1] != nullid or p[0] == nullid:
1797 1786 b.append((t, n, p[0], p[1]))
1798 1787 break
1799 1788 n = p[0]
1800 1789 return b
1801 1790
1802 1791 def between(self, pairs):
1803 1792 r = []
1804 1793
1805 1794 for top, bottom in pairs:
1806 1795 n, l, i = top, [], 0
1807 1796 f = 1
1808 1797
1809 1798 while n != bottom and n != nullid:
1810 1799 p = self.changelog.parents(n)[0]
1811 1800 if i == f:
1812 1801 l.append(n)
1813 1802 f = f * 2
1814 1803 n = p
1815 1804 i += 1
1816 1805
1817 1806 r.append(l)
1818 1807
1819 1808 return r
1820 1809
1821 1810 def pull(self, remote, heads=None, force=False):
1822 1811 # don't open transaction for nothing or you break future useful
1823 1812 # rollback call
1824 1813 tr = None
1825 1814 trname = 'pull\n' + util.hidepassword(remote.url())
1826 1815 lock = self.lock()
1827 1816 try:
1828 1817 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1829 1818 force=force)
1830 1819 common, fetch, rheads = tmp
1831 1820 if not fetch:
1832 1821 self.ui.status(_("no changes found\n"))
1833 1822 added = []
1834 1823 result = 0
1835 1824 else:
1836 1825 tr = self.transaction(trname)
1837 1826 if heads is None and list(common) == [nullid]:
1838 1827 self.ui.status(_("requesting all changes\n"))
1839 1828 elif heads is None and remote.capable('changegroupsubset'):
1840 1829 # issue1320, avoid a race if remote changed after discovery
1841 1830 heads = rheads
1842 1831
1843 1832 if remote.capable('getbundle'):
1844 1833 cg = remote.getbundle('pull', common=common,
1845 1834 heads=heads or rheads)
1846 1835 elif heads is None:
1847 1836 cg = remote.changegroup(fetch, 'pull')
1848 1837 elif not remote.capable('changegroupsubset'):
1849 1838 raise util.Abort(_("partial pull cannot be done because "
1850 1839 "other repository doesn't support "
1851 1840 "changegroupsubset."))
1852 1841 else:
1853 1842 cg = remote.changegroupsubset(fetch, heads, 'pull')
1854 1843 clstart = len(self.changelog)
1855 1844 result = self.addchangegroup(cg, 'pull', remote.url())
1856 1845 clend = len(self.changelog)
1857 1846 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1858 1847
1859 1848 # compute target subset
1860 1849 if heads is None:
1861 1850 # We pulled every thing possible
1862 1851 # sync on everything common
1863 1852 subset = common + added
1864 1853 else:
1865 1854 # We pulled a specific subset
1866 1855 # sync on this subset
1867 1856 subset = heads
1868 1857
1869 1858 # Get remote phases data from remote
1870 1859 remotephases = remote.listkeys('phases')
1871 1860 publishing = bool(remotephases.get('publishing', False))
1872 1861 if remotephases and not publishing:
1873 1862 # remote is new and unpublishing
1874 1863 pheads, _dr = phases.analyzeremotephases(self, subset,
1875 1864 remotephases)
1876 1865 phases.advanceboundary(self, phases.public, pheads)
1877 1866 phases.advanceboundary(self, phases.draft, subset)
1878 1867 else:
1879 1868 # Remote is old or publishing all common changesets
1880 1869 # should be seen as public
1881 1870 phases.advanceboundary(self, phases.public, subset)
1882 1871
1883 1872 if obsolete._enabled:
1884 1873 self.ui.debug('fetching remote obsolete markers\n')
1885 1874 remoteobs = remote.listkeys('obsolete')
1886 1875 if 'dump0' in remoteobs:
1887 1876 if tr is None:
1888 1877 tr = self.transaction(trname)
1889 1878 for key in sorted(remoteobs, reverse=True):
1890 1879 if key.startswith('dump'):
1891 1880 data = base85.b85decode(remoteobs[key])
1892 1881 self.obsstore.mergemarkers(tr, data)
1893 1882 self.invalidatevolatilesets()
1894 1883 if tr is not None:
1895 1884 tr.close()
1896 1885 finally:
1897 1886 if tr is not None:
1898 1887 tr.release()
1899 1888 lock.release()
1900 1889
1901 1890 return result
1902 1891
1903 1892 def checkpush(self, force, revs):
1904 1893 """Extensions can override this function if additional checks have
1905 1894 to be performed before pushing, or call it if they override push
1906 1895 command.
1907 1896 """
1908 1897 pass
1909 1898
1910 1899 def push(self, remote, force=False, revs=None, newbranch=False):
1911 1900 '''Push outgoing changesets (limited by revs) from the current
1912 1901 repository to remote. Return an integer:
1913 1902 - None means nothing to push
1914 1903 - 0 means HTTP error
1915 1904 - 1 means we pushed and remote head count is unchanged *or*
1916 1905 we have outgoing changesets but refused to push
1917 1906 - other values as described by addchangegroup()
1918 1907 '''
1919 1908 # there are two ways to push to remote repo:
1920 1909 #
1921 1910 # addchangegroup assumes local user can lock remote
1922 1911 # repo (local filesystem, old ssh servers).
1923 1912 #
1924 1913 # unbundle assumes local user cannot lock remote repo (new ssh
1925 1914 # servers, http servers).
1926 1915
1927 1916 if not remote.canpush():
1928 1917 raise util.Abort(_("destination does not support push"))
1929 1918 unfi = self.unfiltered()
1930 1919 # get local lock as we might write phase data
1931 1920 locallock = self.lock()
1932 1921 try:
1933 1922 self.checkpush(force, revs)
1934 1923 lock = None
1935 1924 unbundle = remote.capable('unbundle')
1936 1925 if not unbundle:
1937 1926 lock = remote.lock()
1938 1927 try:
1939 1928 # discovery
1940 1929 fci = discovery.findcommonincoming
1941 1930 commoninc = fci(unfi, remote, force=force)
1942 1931 common, inc, remoteheads = commoninc
1943 1932 fco = discovery.findcommonoutgoing
1944 1933 outgoing = fco(unfi, remote, onlyheads=revs,
1945 1934 commoninc=commoninc, force=force)
1946 1935
1947 1936
1948 1937 if not outgoing.missing:
1949 1938 # nothing to push
1950 1939 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1951 1940 ret = None
1952 1941 else:
1953 1942 # something to push
1954 1943 if not force:
1955 1944 # if self.obsstore == False --> no obsolete
1956 1945 # then, save the iteration
1957 1946 if unfi.obsstore:
1958 1947 # this message are here for 80 char limit reason
1959 1948 mso = _("push includes obsolete changeset: %s!")
1960 1949 msu = _("push includes unstable changeset: %s!")
1961 1950 msb = _("push includes bumped changeset: %s!")
1962 1951 msd = _("push includes divergent changeset: %s!")
1963 1952 # If we are to push if there is at least one
1964 1953 # obsolete or unstable changeset in missing, at
1965 1954 # least one of the missinghead will be obsolete or
1966 1955 # unstable. So checking heads only is ok
1967 1956 for node in outgoing.missingheads:
1968 1957 ctx = unfi[node]
1969 1958 if ctx.obsolete():
1970 1959 raise util.Abort(mso % ctx)
1971 1960 elif ctx.unstable():
1972 1961 raise util.Abort(msu % ctx)
1973 1962 elif ctx.bumped():
1974 1963 raise util.Abort(msb % ctx)
1975 1964 elif ctx.divergent():
1976 1965 raise util.Abort(msd % ctx)
1977 1966 discovery.checkheads(unfi, remote, outgoing,
1978 1967 remoteheads, newbranch,
1979 1968 bool(inc))
1980 1969
1981 1970 # create a changegroup from local
1982 1971 if revs is None and not outgoing.excluded:
1983 1972 # push everything,
1984 1973 # use the fast path, no race possible on push
1985 1974 cg = self._changegroup(outgoing.missing, 'push')
1986 1975 else:
1987 1976 cg = self.getlocalbundle('push', outgoing)
1988 1977
1989 1978 # apply changegroup to remote
1990 1979 if unbundle:
1991 1980 # local repo finds heads on server, finds out what
1992 1981 # revs it must push. once revs transferred, if server
1993 1982 # finds it has different heads (someone else won
1994 1983 # commit/push race), server aborts.
1995 1984 if force:
1996 1985 remoteheads = ['force']
1997 1986 # ssh: return remote's addchangegroup()
1998 1987 # http: return remote's addchangegroup() or 0 for error
1999 1988 ret = remote.unbundle(cg, remoteheads, 'push')
2000 1989 else:
2001 1990 # we return an integer indicating remote head count
2002 1991 # change
2003 1992 ret = remote.addchangegroup(cg, 'push', self.url())
2004 1993
2005 1994 if ret:
2006 1995 # push succeed, synchronize target of the push
2007 1996 cheads = outgoing.missingheads
2008 1997 elif revs is None:
2009 1998 # All out push fails. synchronize all common
2010 1999 cheads = outgoing.commonheads
2011 2000 else:
2012 2001 # I want cheads = heads(::missingheads and ::commonheads)
2013 2002 # (missingheads is revs with secret changeset filtered out)
2014 2003 #
2015 2004 # This can be expressed as:
2016 2005 # cheads = ( (missingheads and ::commonheads)
2017 2006 # + (commonheads and ::missingheads))"
2018 2007 # )
2019 2008 #
2020 2009 # while trying to push we already computed the following:
2021 2010 # common = (::commonheads)
2022 2011 # missing = ((commonheads::missingheads) - commonheads)
2023 2012 #
2024 2013 # We can pick:
2025 2014 # * missingheads part of common (::commonheads)
2026 2015 common = set(outgoing.common)
2027 2016 cheads = [node for node in revs if node in common]
2028 2017 # and
2029 2018 # * commonheads parents on missing
2030 2019 revset = unfi.set('%ln and parents(roots(%ln))',
2031 2020 outgoing.commonheads,
2032 2021 outgoing.missing)
2033 2022 cheads.extend(c.node() for c in revset)
2034 2023 # even when we don't push, exchanging phase data is useful
2035 2024 remotephases = remote.listkeys('phases')
2036 2025 if not remotephases: # old server or public only repo
2037 2026 phases.advanceboundary(self, phases.public, cheads)
2038 2027 # don't push any phase data as there is nothing to push
2039 2028 else:
2040 2029 ana = phases.analyzeremotephases(self, cheads, remotephases)
2041 2030 pheads, droots = ana
2042 2031 ### Apply remote phase on local
2043 2032 if remotephases.get('publishing', False):
2044 2033 phases.advanceboundary(self, phases.public, cheads)
2045 2034 else: # publish = False
2046 2035 phases.advanceboundary(self, phases.public, pheads)
2047 2036 phases.advanceboundary(self, phases.draft, cheads)
2048 2037 ### Apply local phase on remote
2049 2038
2050 2039 # Get the list of all revs draft on remote by public here.
2051 2040 # XXX Beware that revset break if droots is not strictly
2052 2041 # XXX root we may want to ensure it is but it is costly
2053 2042 outdated = unfi.set('heads((%ln::%ln) and public())',
2054 2043 droots, cheads)
2055 2044 for newremotehead in outdated:
2056 2045 r = remote.pushkey('phases',
2057 2046 newremotehead.hex(),
2058 2047 str(phases.draft),
2059 2048 str(phases.public))
2060 2049 if not r:
2061 2050 self.ui.warn(_('updating %s to public failed!\n')
2062 2051 % newremotehead)
2063 2052 self.ui.debug('try to push obsolete markers to remote\n')
2064 2053 if (obsolete._enabled and self.obsstore and
2065 2054 'obsolete' in remote.listkeys('namespaces')):
2066 2055 rslts = []
2067 2056 remotedata = self.listkeys('obsolete')
2068 2057 for key in sorted(remotedata, reverse=True):
2069 2058 # reverse sort to ensure we end with dump0
2070 2059 data = remotedata[key]
2071 2060 rslts.append(remote.pushkey('obsolete', key, '', data))
2072 2061 if [r for r in rslts if not r]:
2073 2062 msg = _('failed to push some obsolete markers!\n')
2074 2063 self.ui.warn(msg)
2075 2064 finally:
2076 2065 if lock is not None:
2077 2066 lock.release()
2078 2067 finally:
2079 2068 locallock.release()
2080 2069
2081 2070 self.ui.debug("checking for updated bookmarks\n")
2082 2071 rb = remote.listkeys('bookmarks')
2083 2072 for k in rb.keys():
2084 2073 if k in unfi._bookmarks:
2085 2074 nr, nl = rb[k], hex(self._bookmarks[k])
2086 2075 if nr in unfi:
2087 2076 cr = unfi[nr]
2088 2077 cl = unfi[nl]
2089 2078 if bookmarks.validdest(unfi, cr, cl):
2090 2079 r = remote.pushkey('bookmarks', k, nr, nl)
2091 2080 if r:
2092 2081 self.ui.status(_("updating bookmark %s\n") % k)
2093 2082 else:
2094 2083 self.ui.warn(_('updating bookmark %s'
2095 2084 ' failed!\n') % k)
2096 2085
2097 2086 return ret
2098 2087
2099 2088 def changegroupinfo(self, nodes, source):
2100 2089 if self.ui.verbose or source == 'bundle':
2101 2090 self.ui.status(_("%d changesets found\n") % len(nodes))
2102 2091 if self.ui.debugflag:
2103 2092 self.ui.debug("list of changesets:\n")
2104 2093 for node in nodes:
2105 2094 self.ui.debug("%s\n" % hex(node))
2106 2095
2107 2096 def changegroupsubset(self, bases, heads, source):
2108 2097 """Compute a changegroup consisting of all the nodes that are
2109 2098 descendants of any of the bases and ancestors of any of the heads.
2110 2099 Return a chunkbuffer object whose read() method will return
2111 2100 successive changegroup chunks.
2112 2101
2113 2102 It is fairly complex as determining which filenodes and which
2114 2103 manifest nodes need to be included for the changeset to be complete
2115 2104 is non-trivial.
2116 2105
2117 2106 Another wrinkle is doing the reverse, figuring out which changeset in
2118 2107 the changegroup a particular filenode or manifestnode belongs to.
2119 2108 """
2120 2109 cl = self.changelog
2121 2110 if not bases:
2122 2111 bases = [nullid]
2123 2112 csets, bases, heads = cl.nodesbetween(bases, heads)
2124 2113 # We assume that all ancestors of bases are known
2125 2114 common = cl.ancestors([cl.rev(n) for n in bases])
2126 2115 return self._changegroupsubset(common, csets, heads, source)
2127 2116
2128 2117 def getlocalbundle(self, source, outgoing):
2129 2118 """Like getbundle, but taking a discovery.outgoing as an argument.
2130 2119
2131 2120 This is only implemented for local repos and reuses potentially
2132 2121 precomputed sets in outgoing."""
2133 2122 if not outgoing.missing:
2134 2123 return None
2135 2124 return self._changegroupsubset(outgoing.common,
2136 2125 outgoing.missing,
2137 2126 outgoing.missingheads,
2138 2127 source)
2139 2128
2140 2129 def getbundle(self, source, heads=None, common=None):
2141 2130 """Like changegroupsubset, but returns the set difference between the
2142 2131 ancestors of heads and the ancestors common.
2143 2132
2144 2133 If heads is None, use the local heads. If common is None, use [nullid].
2145 2134
2146 2135 The nodes in common might not all be known locally due to the way the
2147 2136 current discovery protocol works.
2148 2137 """
2149 2138 cl = self.changelog
2150 2139 if common:
2151 2140 hasnode = cl.hasnode
2152 2141 common = [n for n in common if hasnode(n)]
2153 2142 else:
2154 2143 common = [nullid]
2155 2144 if not heads:
2156 2145 heads = cl.heads()
2157 2146 return self.getlocalbundle(source,
2158 2147 discovery.outgoing(cl, common, heads))
2159 2148
2160 2149 @unfilteredmethod
2161 2150 def _changegroupsubset(self, commonrevs, csets, heads, source):
2162 2151
2163 2152 cl = self.changelog
2164 2153 mf = self.manifest
2165 2154 mfs = {} # needed manifests
2166 2155 fnodes = {} # needed file nodes
2167 2156 changedfiles = set()
2168 2157 fstate = ['', {}]
2169 2158 count = [0, 0]
2170 2159
2171 2160 # can we go through the fast path ?
2172 2161 heads.sort()
2173 2162 if heads == sorted(self.heads()):
2174 2163 return self._changegroup(csets, source)
2175 2164
2176 2165 # slow path
2177 2166 self.hook('preoutgoing', throw=True, source=source)
2178 2167 self.changegroupinfo(csets, source)
2179 2168
2180 2169 # filter any nodes that claim to be part of the known set
2181 2170 def prune(revlog, missing):
2182 2171 rr, rl = revlog.rev, revlog.linkrev
2183 2172 return [n for n in missing
2184 2173 if rl(rr(n)) not in commonrevs]
2185 2174
2186 2175 progress = self.ui.progress
2187 2176 _bundling = _('bundling')
2188 2177 _changesets = _('changesets')
2189 2178 _manifests = _('manifests')
2190 2179 _files = _('files')
2191 2180
2192 2181 def lookup(revlog, x):
2193 2182 if revlog == cl:
2194 2183 c = cl.read(x)
2195 2184 changedfiles.update(c[3])
2196 2185 mfs.setdefault(c[0], x)
2197 2186 count[0] += 1
2198 2187 progress(_bundling, count[0],
2199 2188 unit=_changesets, total=count[1])
2200 2189 return x
2201 2190 elif revlog == mf:
2202 2191 clnode = mfs[x]
2203 2192 mdata = mf.readfast(x)
2204 2193 for f, n in mdata.iteritems():
2205 2194 if f in changedfiles:
2206 2195 fnodes[f].setdefault(n, clnode)
2207 2196 count[0] += 1
2208 2197 progress(_bundling, count[0],
2209 2198 unit=_manifests, total=count[1])
2210 2199 return clnode
2211 2200 else:
2212 2201 progress(_bundling, count[0], item=fstate[0],
2213 2202 unit=_files, total=count[1])
2214 2203 return fstate[1][x]
2215 2204
2216 2205 bundler = changegroup.bundle10(lookup)
2217 2206 reorder = self.ui.config('bundle', 'reorder', 'auto')
2218 2207 if reorder == 'auto':
2219 2208 reorder = None
2220 2209 else:
2221 2210 reorder = util.parsebool(reorder)
2222 2211
2223 2212 def gengroup():
2224 2213 # Create a changenode group generator that will call our functions
2225 2214 # back to lookup the owning changenode and collect information.
2226 2215 count[:] = [0, len(csets)]
2227 2216 for chunk in cl.group(csets, bundler, reorder=reorder):
2228 2217 yield chunk
2229 2218 progress(_bundling, None)
2230 2219
2231 2220 # Create a generator for the manifestnodes that calls our lookup
2232 2221 # and data collection functions back.
2233 2222 for f in changedfiles:
2234 2223 fnodes[f] = {}
2235 2224 count[:] = [0, len(mfs)]
2236 2225 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2237 2226 yield chunk
2238 2227 progress(_bundling, None)
2239 2228
2240 2229 mfs.clear()
2241 2230
2242 2231 # Go through all our files in order sorted by name.
2243 2232 count[:] = [0, len(changedfiles)]
2244 2233 for fname in sorted(changedfiles):
2245 2234 filerevlog = self.file(fname)
2246 2235 if not len(filerevlog):
2247 2236 raise util.Abort(_("empty or missing revlog for %s")
2248 2237 % fname)
2249 2238 fstate[0] = fname
2250 2239 fstate[1] = fnodes.pop(fname, {})
2251 2240
2252 2241 nodelist = prune(filerevlog, fstate[1])
2253 2242 if nodelist:
2254 2243 count[0] += 1
2255 2244 yield bundler.fileheader(fname)
2256 2245 for chunk in filerevlog.group(nodelist, bundler, reorder):
2257 2246 yield chunk
2258 2247
2259 2248 # Signal that no more groups are left.
2260 2249 yield bundler.close()
2261 2250 progress(_bundling, None)
2262 2251
2263 2252 if csets:
2264 2253 self.hook('outgoing', node=hex(csets[0]), source=source)
2265 2254
2266 2255 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2267 2256
2268 2257 def changegroup(self, basenodes, source):
2269 2258 # to avoid a race we use changegroupsubset() (issue1320)
2270 2259 return self.changegroupsubset(basenodes, self.heads(), source)
2271 2260
2272 2261 @unfilteredmethod
2273 2262 def _changegroup(self, nodes, source):
2274 2263 """Compute the changegroup of all nodes that we have that a recipient
2275 2264 doesn't. Return a chunkbuffer object whose read() method will return
2276 2265 successive changegroup chunks.
2277 2266
2278 2267 This is much easier than the previous function as we can assume that
2279 2268 the recipient has any changenode we aren't sending them.
2280 2269
2281 2270 nodes is the set of nodes to send"""
2282 2271
2283 2272 cl = self.changelog
2284 2273 mf = self.manifest
2285 2274 mfs = {}
2286 2275 changedfiles = set()
2287 2276 fstate = ['']
2288 2277 count = [0, 0]
2289 2278
2290 2279 self.hook('preoutgoing', throw=True, source=source)
2291 2280 self.changegroupinfo(nodes, source)
2292 2281
2293 2282 revset = set([cl.rev(n) for n in nodes])
2294 2283
2295 2284 def gennodelst(log):
2296 2285 ln, llr = log.node, log.linkrev
2297 2286 return [ln(r) for r in log if llr(r) in revset]
2298 2287
2299 2288 progress = self.ui.progress
2300 2289 _bundling = _('bundling')
2301 2290 _changesets = _('changesets')
2302 2291 _manifests = _('manifests')
2303 2292 _files = _('files')
2304 2293
2305 2294 def lookup(revlog, x):
2306 2295 if revlog == cl:
2307 2296 c = cl.read(x)
2308 2297 changedfiles.update(c[3])
2309 2298 mfs.setdefault(c[0], x)
2310 2299 count[0] += 1
2311 2300 progress(_bundling, count[0],
2312 2301 unit=_changesets, total=count[1])
2313 2302 return x
2314 2303 elif revlog == mf:
2315 2304 count[0] += 1
2316 2305 progress(_bundling, count[0],
2317 2306 unit=_manifests, total=count[1])
2318 2307 return cl.node(revlog.linkrev(revlog.rev(x)))
2319 2308 else:
2320 2309 progress(_bundling, count[0], item=fstate[0],
2321 2310 total=count[1], unit=_files)
2322 2311 return cl.node(revlog.linkrev(revlog.rev(x)))
2323 2312
2324 2313 bundler = changegroup.bundle10(lookup)
2325 2314 reorder = self.ui.config('bundle', 'reorder', 'auto')
2326 2315 if reorder == 'auto':
2327 2316 reorder = None
2328 2317 else:
2329 2318 reorder = util.parsebool(reorder)
2330 2319
2331 2320 def gengroup():
2332 2321 '''yield a sequence of changegroup chunks (strings)'''
2333 2322 # construct a list of all changed files
2334 2323
2335 2324 count[:] = [0, len(nodes)]
2336 2325 for chunk in cl.group(nodes, bundler, reorder=reorder):
2337 2326 yield chunk
2338 2327 progress(_bundling, None)
2339 2328
2340 2329 count[:] = [0, len(mfs)]
2341 2330 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2342 2331 yield chunk
2343 2332 progress(_bundling, None)
2344 2333
2345 2334 count[:] = [0, len(changedfiles)]
2346 2335 for fname in sorted(changedfiles):
2347 2336 filerevlog = self.file(fname)
2348 2337 if not len(filerevlog):
2349 2338 raise util.Abort(_("empty or missing revlog for %s")
2350 2339 % fname)
2351 2340 fstate[0] = fname
2352 2341 nodelist = gennodelst(filerevlog)
2353 2342 if nodelist:
2354 2343 count[0] += 1
2355 2344 yield bundler.fileheader(fname)
2356 2345 for chunk in filerevlog.group(nodelist, bundler, reorder):
2357 2346 yield chunk
2358 2347 yield bundler.close()
2359 2348 progress(_bundling, None)
2360 2349
2361 2350 if nodes:
2362 2351 self.hook('outgoing', node=hex(nodes[0]), source=source)
2363 2352
2364 2353 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2365 2354
2366 2355 @unfilteredmethod
2367 2356 def addchangegroup(self, source, srctype, url, emptyok=False):
2368 2357 """Add the changegroup returned by source.read() to this repo.
2369 2358 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2370 2359 the URL of the repo where this changegroup is coming from.
2371 2360
2372 2361 Return an integer summarizing the change to this repo:
2373 2362 - nothing changed or no source: 0
2374 2363 - more heads than before: 1+added heads (2..n)
2375 2364 - fewer heads than before: -1-removed heads (-2..-n)
2376 2365 - number of heads stays the same: 1
2377 2366 """
2378 2367 def csmap(x):
2379 2368 self.ui.debug("add changeset %s\n" % short(x))
2380 2369 return len(cl)
2381 2370
2382 2371 def revmap(x):
2383 2372 return cl.rev(x)
2384 2373
2385 2374 if not source:
2386 2375 return 0
2387 2376
2388 2377 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2389 2378
2390 2379 changesets = files = revisions = 0
2391 2380 efiles = set()
2392 2381
2393 2382 # write changelog data to temp files so concurrent readers will not see
2394 2383 # inconsistent view
2395 2384 cl = self.changelog
2396 2385 cl.delayupdate()
2397 2386 oldheads = cl.heads()
2398 2387
2399 2388 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2400 2389 try:
2401 2390 trp = weakref.proxy(tr)
2402 2391 # pull off the changeset group
2403 2392 self.ui.status(_("adding changesets\n"))
2404 2393 clstart = len(cl)
2405 2394 class prog(object):
2406 2395 step = _('changesets')
2407 2396 count = 1
2408 2397 ui = self.ui
2409 2398 total = None
2410 2399 def __call__(self):
2411 2400 self.ui.progress(self.step, self.count, unit=_('chunks'),
2412 2401 total=self.total)
2413 2402 self.count += 1
2414 2403 pr = prog()
2415 2404 source.callback = pr
2416 2405
2417 2406 source.changelogheader()
2418 2407 srccontent = cl.addgroup(source, csmap, trp)
2419 2408 if not (srccontent or emptyok):
2420 2409 raise util.Abort(_("received changelog group is empty"))
2421 2410 clend = len(cl)
2422 2411 changesets = clend - clstart
2423 2412 for c in xrange(clstart, clend):
2424 2413 efiles.update(self[c].files())
2425 2414 efiles = len(efiles)
2426 2415 self.ui.progress(_('changesets'), None)
2427 2416
2428 2417 # pull off the manifest group
2429 2418 self.ui.status(_("adding manifests\n"))
2430 2419 pr.step = _('manifests')
2431 2420 pr.count = 1
2432 2421 pr.total = changesets # manifests <= changesets
2433 2422 # no need to check for empty manifest group here:
2434 2423 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2435 2424 # no new manifest will be created and the manifest group will
2436 2425 # be empty during the pull
2437 2426 source.manifestheader()
2438 2427 self.manifest.addgroup(source, revmap, trp)
2439 2428 self.ui.progress(_('manifests'), None)
2440 2429
2441 2430 needfiles = {}
2442 2431 if self.ui.configbool('server', 'validate', default=False):
2443 2432 # validate incoming csets have their manifests
2444 2433 for cset in xrange(clstart, clend):
2445 2434 mfest = self.changelog.read(self.changelog.node(cset))[0]
2446 2435 mfest = self.manifest.readdelta(mfest)
2447 2436 # store file nodes we must see
2448 2437 for f, n in mfest.iteritems():
2449 2438 needfiles.setdefault(f, set()).add(n)
2450 2439
2451 2440 # process the files
2452 2441 self.ui.status(_("adding file changes\n"))
2453 2442 pr.step = _('files')
2454 2443 pr.count = 1
2455 2444 pr.total = efiles
2456 2445 source.callback = None
2457 2446
2458 2447 while True:
2459 2448 chunkdata = source.filelogheader()
2460 2449 if not chunkdata:
2461 2450 break
2462 2451 f = chunkdata["filename"]
2463 2452 self.ui.debug("adding %s revisions\n" % f)
2464 2453 pr()
2465 2454 fl = self.file(f)
2466 2455 o = len(fl)
2467 2456 if not fl.addgroup(source, revmap, trp):
2468 2457 raise util.Abort(_("received file revlog group is empty"))
2469 2458 revisions += len(fl) - o
2470 2459 files += 1
2471 2460 if f in needfiles:
2472 2461 needs = needfiles[f]
2473 2462 for new in xrange(o, len(fl)):
2474 2463 n = fl.node(new)
2475 2464 if n in needs:
2476 2465 needs.remove(n)
2477 2466 if not needs:
2478 2467 del needfiles[f]
2479 2468 self.ui.progress(_('files'), None)
2480 2469
2481 2470 for f, needs in needfiles.iteritems():
2482 2471 fl = self.file(f)
2483 2472 for n in needs:
2484 2473 try:
2485 2474 fl.rev(n)
2486 2475 except error.LookupError:
2487 2476 raise util.Abort(
2488 2477 _('missing file data for %s:%s - run hg verify') %
2489 2478 (f, hex(n)))
2490 2479
2491 2480 dh = 0
2492 2481 if oldheads:
2493 2482 heads = cl.heads()
2494 2483 dh = len(heads) - len(oldheads)
2495 2484 for h in heads:
2496 2485 if h not in oldheads and self[h].closesbranch():
2497 2486 dh -= 1
2498 2487 htext = ""
2499 2488 if dh:
2500 2489 htext = _(" (%+d heads)") % dh
2501 2490
2502 2491 self.ui.status(_("added %d changesets"
2503 2492 " with %d changes to %d files%s\n")
2504 2493 % (changesets, revisions, files, htext))
2505 2494 self.invalidatevolatilesets()
2506 2495
2507 2496 if changesets > 0:
2508 2497 p = lambda: cl.writepending() and self.root or ""
2509 2498 self.hook('pretxnchangegroup', throw=True,
2510 2499 node=hex(cl.node(clstart)), source=srctype,
2511 2500 url=url, pending=p)
2512 2501
2513 2502 added = [cl.node(r) for r in xrange(clstart, clend)]
2514 2503 publishing = self.ui.configbool('phases', 'publish', True)
2515 2504 if srctype == 'push':
2516 2505 # Old server can not push the boundary themself.
2517 2506 # New server won't push the boundary if changeset already
2518 2507 # existed locally as secrete
2519 2508 #
2520 2509 # We should not use added here but the list of all change in
2521 2510 # the bundle
2522 2511 if publishing:
2523 2512 phases.advanceboundary(self, phases.public, srccontent)
2524 2513 else:
2525 2514 phases.advanceboundary(self, phases.draft, srccontent)
2526 2515 phases.retractboundary(self, phases.draft, added)
2527 2516 elif srctype != 'strip':
2528 2517 # publishing only alter behavior during push
2529 2518 #
2530 2519 # strip should not touch boundary at all
2531 2520 phases.retractboundary(self, phases.draft, added)
2532 2521
2533 2522 # make changelog see real files again
2534 2523 cl.finalize(trp)
2535 2524
2536 2525 tr.close()
2537 2526
2538 2527 if changesets > 0:
2539 2528 self.updatebranchcache()
2540 2529 def runhooks():
2541 2530 # forcefully update the on-disk branch cache
2542 2531 self.ui.debug("updating the branch cache\n")
2543 2532 self.hook("changegroup", node=hex(cl.node(clstart)),
2544 2533 source=srctype, url=url)
2545 2534
2546 2535 for n in added:
2547 2536 self.hook("incoming", node=hex(n), source=srctype,
2548 2537 url=url)
2549 2538 self._afterlock(runhooks)
2550 2539
2551 2540 finally:
2552 2541 tr.release()
2553 2542 # never return 0 here:
2554 2543 if dh < 0:
2555 2544 return dh - 1
2556 2545 else:
2557 2546 return dh + 1
2558 2547
2559 2548 def stream_in(self, remote, requirements):
2560 2549 lock = self.lock()
2561 2550 try:
2562 2551 # Save remote branchmap. We will use it later
2563 2552 # to speed up branchcache creation
2564 2553 rbranchmap = None
2565 2554 if remote.capable("branchmap"):
2566 2555 rbranchmap = remote.branchmap()
2567 2556
2568 2557 fp = remote.stream_out()
2569 2558 l = fp.readline()
2570 2559 try:
2571 2560 resp = int(l)
2572 2561 except ValueError:
2573 2562 raise error.ResponseError(
2574 2563 _('unexpected response from remote server:'), l)
2575 2564 if resp == 1:
2576 2565 raise util.Abort(_('operation forbidden by server'))
2577 2566 elif resp == 2:
2578 2567 raise util.Abort(_('locking the remote repository failed'))
2579 2568 elif resp != 0:
2580 2569 raise util.Abort(_('the server sent an unknown error code'))
2581 2570 self.ui.status(_('streaming all changes\n'))
2582 2571 l = fp.readline()
2583 2572 try:
2584 2573 total_files, total_bytes = map(int, l.split(' ', 1))
2585 2574 except (ValueError, TypeError):
2586 2575 raise error.ResponseError(
2587 2576 _('unexpected response from remote server:'), l)
2588 2577 self.ui.status(_('%d files to transfer, %s of data\n') %
2589 2578 (total_files, util.bytecount(total_bytes)))
2590 2579 handled_bytes = 0
2591 2580 self.ui.progress(_('clone'), 0, total=total_bytes)
2592 2581 start = time.time()
2593 2582 for i in xrange(total_files):
2594 2583 # XXX doesn't support '\n' or '\r' in filenames
2595 2584 l = fp.readline()
2596 2585 try:
2597 2586 name, size = l.split('\0', 1)
2598 2587 size = int(size)
2599 2588 except (ValueError, TypeError):
2600 2589 raise error.ResponseError(
2601 2590 _('unexpected response from remote server:'), l)
2602 2591 if self.ui.debugflag:
2603 2592 self.ui.debug('adding %s (%s)\n' %
2604 2593 (name, util.bytecount(size)))
2605 2594 # for backwards compat, name was partially encoded
2606 2595 ofp = self.sopener(store.decodedir(name), 'w')
2607 2596 for chunk in util.filechunkiter(fp, limit=size):
2608 2597 handled_bytes += len(chunk)
2609 2598 self.ui.progress(_('clone'), handled_bytes,
2610 2599 total=total_bytes)
2611 2600 ofp.write(chunk)
2612 2601 ofp.close()
2613 2602 elapsed = time.time() - start
2614 2603 if elapsed <= 0:
2615 2604 elapsed = 0.001
2616 2605 self.ui.progress(_('clone'), None)
2617 2606 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2618 2607 (util.bytecount(total_bytes), elapsed,
2619 2608 util.bytecount(total_bytes / elapsed)))
2620 2609
2621 2610 # new requirements = old non-format requirements +
2622 2611 # new format-related
2623 2612 # requirements from the streamed-in repository
2624 2613 requirements.update(set(self.requirements) - self.supportedformats)
2625 2614 self._applyrequirements(requirements)
2626 2615 self._writerequirements()
2627 2616
2628 2617 if rbranchmap:
2629 2618 rbheads = []
2630 2619 for bheads in rbranchmap.itervalues():
2631 2620 rbheads.extend(bheads)
2632 2621
2633 2622 self.branchcache = rbranchmap
2634 2623 if rbheads:
2635 2624 rtiprev = max((int(self.changelog.rev(node))
2636 2625 for node in rbheads))
2637 self._writebranchcache(self.branchcache,
2626 branchmap.write(self, self.branchcache,
2638 2627 self[rtiprev].node(), rtiprev)
2639 2628 self.invalidate()
2640 2629 return len(self.heads()) + 1
2641 2630 finally:
2642 2631 lock.release()
2643 2632
2644 2633 def clone(self, remote, heads=[], stream=False):
2645 2634 '''clone remote repository.
2646 2635
2647 2636 keyword arguments:
2648 2637 heads: list of revs to clone (forces use of pull)
2649 2638 stream: use streaming clone if possible'''
2650 2639
2651 2640 # now, all clients that can request uncompressed clones can
2652 2641 # read repo formats supported by all servers that can serve
2653 2642 # them.
2654 2643
2655 2644 # if revlog format changes, client will have to check version
2656 2645 # and format flags on "stream" capability, and use
2657 2646 # uncompressed only if compatible.
2658 2647
2659 2648 if not stream:
2660 2649 # if the server explicitly prefers to stream (for fast LANs)
2661 2650 stream = remote.capable('stream-preferred')
2662 2651
2663 2652 if stream and not heads:
2664 2653 # 'stream' means remote revlog format is revlogv1 only
2665 2654 if remote.capable('stream'):
2666 2655 return self.stream_in(remote, set(('revlogv1',)))
2667 2656 # otherwise, 'streamreqs' contains the remote revlog format
2668 2657 streamreqs = remote.capable('streamreqs')
2669 2658 if streamreqs:
2670 2659 streamreqs = set(streamreqs.split(','))
2671 2660 # if we support it, stream in and adjust our requirements
2672 2661 if not streamreqs - self.supportedformats:
2673 2662 return self.stream_in(remote, streamreqs)
2674 2663 return self.pull(remote, heads)
2675 2664
2676 2665 def pushkey(self, namespace, key, old, new):
2677 2666 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2678 2667 old=old, new=new)
2679 2668 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2680 2669 ret = pushkey.push(self, namespace, key, old, new)
2681 2670 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2682 2671 ret=ret)
2683 2672 return ret
2684 2673
2685 2674 def listkeys(self, namespace):
2686 2675 self.hook('prelistkeys', throw=True, namespace=namespace)
2687 2676 self.ui.debug('listing keys for "%s"\n' % namespace)
2688 2677 values = pushkey.list(self, namespace)
2689 2678 self.hook('listkeys', namespace=namespace, values=values)
2690 2679 return values
2691 2680
2692 2681 def debugwireargs(self, one, two, three=None, four=None, five=None):
2693 2682 '''used to test argument passing over the wire'''
2694 2683 return "%s %s %s %s %s" % (one, two, three, four, five)
2695 2684
2696 2685 def savecommitmessage(self, text):
2697 2686 fp = self.opener('last-message.txt', 'wb')
2698 2687 try:
2699 2688 fp.write(text)
2700 2689 finally:
2701 2690 fp.close()
2702 2691 return self.pathto(fp.name[len(self.root) + 1:])
2703 2692
2704 2693 # used to avoid circular references so destructors work
2705 2694 def aftertrans(files):
2706 2695 renamefiles = [tuple(t) for t in files]
2707 2696 def a():
2708 2697 for src, dest in renamefiles:
2709 2698 try:
2710 2699 util.rename(src, dest)
2711 2700 except OSError: # journal file does not yet exist
2712 2701 pass
2713 2702 return a
2714 2703
2715 2704 def undoname(fn):
2716 2705 base, name = os.path.split(fn)
2717 2706 assert name.startswith('journal')
2718 2707 return os.path.join(base, name.replace('journal', 'undo', 1))
2719 2708
2720 2709 def instance(ui, path, create):
2721 2710 return localrepository(ui, util.urllocalpath(path), create)
2722 2711
2723 2712 def islocal(path):
2724 2713 return True
General Comments 0
You need to be logged in to leave comments. Login now