##// END OF EJS Templates
clfilter: ensure unfiltered repo have a filtername attribute too...
Pierre-Yves David -
r18186:d336f53c default
parent child Browse files
Show More
@@ -1,2589 +1,2590 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return discovery.visiblebranchmap(self._repo)
95 95
96 96 def heads(self):
97 97 return discovery.visibleheads(self._repo)
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 filtername = None
150 151
151 152 def _baserequirements(self, create):
152 153 return self.requirements[:]
153 154
154 155 def __init__(self, baseui, path=None, create=False):
155 156 self.wvfs = scmutil.vfs(path, expand=True)
156 157 self.wopener = self.wvfs
157 158 self.root = self.wvfs.base
158 159 self.path = self.wvfs.join(".hg")
159 160 self.origroot = path
160 161 self.auditor = scmutil.pathauditor(self.root, self._checknested)
161 162 self.vfs = scmutil.vfs(self.path)
162 163 self.opener = self.vfs
163 164 self.baseui = baseui
164 165 self.ui = baseui.copy()
165 166 # A list of callback to shape the phase if no data were found.
166 167 # Callback are in the form: func(repo, roots) --> processed root.
167 168 # This list it to be filled by extension during repo setup
168 169 self._phasedefaults = []
169 170 try:
170 171 self.ui.readconfig(self.join("hgrc"), self.root)
171 172 extensions.loadall(self.ui)
172 173 except IOError:
173 174 pass
174 175
175 176 if not self.vfs.isdir():
176 177 if create:
177 178 if not self.wvfs.exists():
178 179 self.wvfs.makedirs()
179 180 self.vfs.makedir(notindexed=True)
180 181 requirements = self._baserequirements(create)
181 182 if self.ui.configbool('format', 'usestore', True):
182 183 self.vfs.mkdir("store")
183 184 requirements.append("store")
184 185 if self.ui.configbool('format', 'usefncache', True):
185 186 requirements.append("fncache")
186 187 if self.ui.configbool('format', 'dotencode', True):
187 188 requirements.append('dotencode')
188 189 # create an invalid changelog
189 190 self.vfs.append(
190 191 "00changelog.i",
191 192 '\0\0\0\2' # represents revlogv2
192 193 ' dummy changelog to prevent using the old repo layout'
193 194 )
194 195 if self.ui.configbool('format', 'generaldelta', False):
195 196 requirements.append("generaldelta")
196 197 requirements = set(requirements)
197 198 else:
198 199 raise error.RepoError(_("repository %s not found") % path)
199 200 elif create:
200 201 raise error.RepoError(_("repository %s already exists") % path)
201 202 else:
202 203 try:
203 204 requirements = scmutil.readrequires(self.vfs, self.supported)
204 205 except IOError, inst:
205 206 if inst.errno != errno.ENOENT:
206 207 raise
207 208 requirements = set()
208 209
209 210 self.sharedpath = self.path
210 211 try:
211 212 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
212 213 if not os.path.exists(s):
213 214 raise error.RepoError(
214 215 _('.hg/sharedpath points to nonexistent directory %s') % s)
215 216 self.sharedpath = s
216 217 except IOError, inst:
217 218 if inst.errno != errno.ENOENT:
218 219 raise
219 220
220 221 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
221 222 self.spath = self.store.path
222 223 self.svfs = self.store.vfs
223 224 self.sopener = self.svfs
224 225 self.sjoin = self.store.join
225 226 self.vfs.createmode = self.store.createmode
226 227 self._applyrequirements(requirements)
227 228 if create:
228 229 self._writerequirements()
229 230
230 231
231 232 self._branchcache = None
232 233 self.filterpats = {}
233 234 self._datafilters = {}
234 235 self._transref = self._lockref = self._wlockref = None
235 236
236 237 # A cache for various files under .hg/ that tracks file changes,
237 238 # (used by the filecache decorator)
238 239 #
239 240 # Maps a property name to its util.filecacheentry
240 241 self._filecache = {}
241 242
242 243 # hold sets of revision to be filtered
243 244 # should be cleared when something might have changed the filter value:
244 245 # - new changesets,
245 246 # - phase change,
246 247 # - new obsolescence marker,
247 248 # - working directory parent change,
248 249 # - bookmark changes
249 250 self.filteredrevcache = {}
250 251
251 252 def close(self):
252 253 pass
253 254
254 255 def _restrictcapabilities(self, caps):
255 256 return caps
256 257
257 258 def _applyrequirements(self, requirements):
258 259 self.requirements = requirements
259 260 self.sopener.options = dict((r, 1) for r in requirements
260 261 if r in self.openerreqs)
261 262
262 263 def _writerequirements(self):
263 264 reqfile = self.opener("requires", "w")
264 265 for r in self.requirements:
265 266 reqfile.write("%s\n" % r)
266 267 reqfile.close()
267 268
268 269 def _checknested(self, path):
269 270 """Determine if path is a legal nested repository."""
270 271 if not path.startswith(self.root):
271 272 return False
272 273 subpath = path[len(self.root) + 1:]
273 274 normsubpath = util.pconvert(subpath)
274 275
275 276 # XXX: Checking against the current working copy is wrong in
276 277 # the sense that it can reject things like
277 278 #
278 279 # $ hg cat -r 10 sub/x.txt
279 280 #
280 281 # if sub/ is no longer a subrepository in the working copy
281 282 # parent revision.
282 283 #
283 284 # However, it can of course also allow things that would have
284 285 # been rejected before, such as the above cat command if sub/
285 286 # is a subrepository now, but was a normal directory before.
286 287 # The old path auditor would have rejected by mistake since it
287 288 # panics when it sees sub/.hg/.
288 289 #
289 290 # All in all, checking against the working copy seems sensible
290 291 # since we want to prevent access to nested repositories on
291 292 # the filesystem *now*.
292 293 ctx = self[None]
293 294 parts = util.splitpath(subpath)
294 295 while parts:
295 296 prefix = '/'.join(parts)
296 297 if prefix in ctx.substate:
297 298 if prefix == normsubpath:
298 299 return True
299 300 else:
300 301 sub = ctx.sub(prefix)
301 302 return sub.checknested(subpath[len(prefix) + 1:])
302 303 else:
303 304 parts.pop()
304 305 return False
305 306
306 307 def peer(self):
307 308 return localpeer(self) # not cached to avoid reference cycle
308 309
309 310 def unfiltered(self):
310 311 """Return unfiltered version of the repository
311 312
312 313 Intended to be ovewritten by filtered repo."""
313 314 return self
314 315
315 316 def filtered(self, name):
316 317 """Return a filtered version of a repository"""
317 318 # build a new class with the mixin and the current class
318 319 # (possibily subclass of the repo)
319 320 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 321 pass
321 322 return proxycls(self, name)
322 323
323 324 @repofilecache('bookmarks')
324 325 def _bookmarks(self):
325 326 return bookmarks.bmstore(self)
326 327
327 328 @repofilecache('bookmarks.current')
328 329 def _bookmarkcurrent(self):
329 330 return bookmarks.readcurrent(self)
330 331
331 332 def bookmarkheads(self, bookmark):
332 333 name = bookmark.split('@', 1)[0]
333 334 heads = []
334 335 for mark, n in self._bookmarks.iteritems():
335 336 if mark.split('@', 1)[0] == name:
336 337 heads.append(n)
337 338 return heads
338 339
339 340 @storecache('phaseroots')
340 341 def _phasecache(self):
341 342 return phases.phasecache(self, self._phasedefaults)
342 343
343 344 @storecache('obsstore')
344 345 def obsstore(self):
345 346 store = obsolete.obsstore(self.sopener)
346 347 if store and not obsolete._enabled:
347 348 # message is rare enough to not be translated
348 349 msg = 'obsolete feature not enabled but %i markers found!\n'
349 350 self.ui.warn(msg % len(list(store)))
350 351 return store
351 352
352 353 @unfilteredpropertycache
353 354 def hiddenrevs(self):
354 355 """hiddenrevs: revs that should be hidden by command and tools
355 356
356 357 This set is carried on the repo to ease initialization and lazy
357 358 loading; it'll probably move back to changelog for efficiency and
358 359 consistency reasons.
359 360
360 361 Note that the hiddenrevs will needs invalidations when
361 362 - a new changesets is added (possible unstable above extinct)
362 363 - a new obsolete marker is added (possible new extinct changeset)
363 364
364 365 hidden changesets cannot have non-hidden descendants
365 366 """
366 367 hidden = set()
367 368 if self.obsstore:
368 369 ### hide extinct changeset that are not accessible by any mean
369 370 hiddenquery = 'extinct() - ::(. + bookmark())'
370 371 hidden.update(self.revs(hiddenquery))
371 372 return hidden
372 373
373 374 @storecache('00changelog.i')
374 375 def changelog(self):
375 376 c = changelog.changelog(self.sopener)
376 377 if 'HG_PENDING' in os.environ:
377 378 p = os.environ['HG_PENDING']
378 379 if p.startswith(self.root):
379 380 c.readpending('00changelog.i.a')
380 381 return c
381 382
382 383 @storecache('00manifest.i')
383 384 def manifest(self):
384 385 return manifest.manifest(self.sopener)
385 386
386 387 @repofilecache('dirstate')
387 388 def dirstate(self):
388 389 warned = [0]
389 390 def validate(node):
390 391 try:
391 392 self.changelog.rev(node)
392 393 return node
393 394 except error.LookupError:
394 395 if not warned[0]:
395 396 warned[0] = True
396 397 self.ui.warn(_("warning: ignoring unknown"
397 398 " working parent %s!\n") % short(node))
398 399 return nullid
399 400
400 401 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401 402
402 403 def __getitem__(self, changeid):
403 404 if changeid is None:
404 405 return context.workingctx(self)
405 406 return context.changectx(self, changeid)
406 407
407 408 def __contains__(self, changeid):
408 409 try:
409 410 return bool(self.lookup(changeid))
410 411 except error.RepoLookupError:
411 412 return False
412 413
413 414 def __nonzero__(self):
414 415 return True
415 416
416 417 def __len__(self):
417 418 return len(self.changelog)
418 419
419 420 def __iter__(self):
420 421 return iter(self.changelog)
421 422
422 423 def revs(self, expr, *args):
423 424 '''Return a list of revisions matching the given revset'''
424 425 expr = revset.formatspec(expr, *args)
425 426 m = revset.match(None, expr)
426 427 return [r for r in m(self, list(self))]
427 428
428 429 def set(self, expr, *args):
429 430 '''
430 431 Yield a context for each matching revision, after doing arg
431 432 replacement via revset.formatspec
432 433 '''
433 434 for r in self.revs(expr, *args):
434 435 yield self[r]
435 436
436 437 def url(self):
437 438 return 'file:' + self.root
438 439
439 440 def hook(self, name, throw=False, **args):
440 441 return hook.hook(self.ui, self, name, throw, **args)
441 442
442 443 @unfilteredmethod
443 444 def _tag(self, names, node, message, local, user, date, extra={}):
444 445 if isinstance(names, str):
445 446 names = (names,)
446 447
447 448 branches = self.branchmap()
448 449 for name in names:
449 450 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 451 local=local)
451 452 if name in branches:
452 453 self.ui.warn(_("warning: tag %s conflicts with existing"
453 454 " branch name\n") % name)
454 455
455 456 def writetags(fp, names, munge, prevtags):
456 457 fp.seek(0, 2)
457 458 if prevtags and prevtags[-1] != '\n':
458 459 fp.write('\n')
459 460 for name in names:
460 461 m = munge and munge(name) or name
461 462 if (self._tagscache.tagtypes and
462 463 name in self._tagscache.tagtypes):
463 464 old = self.tags().get(name, nullid)
464 465 fp.write('%s %s\n' % (hex(old), m))
465 466 fp.write('%s %s\n' % (hex(node), m))
466 467 fp.close()
467 468
468 469 prevtags = ''
469 470 if local:
470 471 try:
471 472 fp = self.opener('localtags', 'r+')
472 473 except IOError:
473 474 fp = self.opener('localtags', 'a')
474 475 else:
475 476 prevtags = fp.read()
476 477
477 478 # local tags are stored in the current charset
478 479 writetags(fp, names, None, prevtags)
479 480 for name in names:
480 481 self.hook('tag', node=hex(node), tag=name, local=local)
481 482 return
482 483
483 484 try:
484 485 fp = self.wfile('.hgtags', 'rb+')
485 486 except IOError, e:
486 487 if e.errno != errno.ENOENT:
487 488 raise
488 489 fp = self.wfile('.hgtags', 'ab')
489 490 else:
490 491 prevtags = fp.read()
491 492
492 493 # committed tags are stored in UTF-8
493 494 writetags(fp, names, encoding.fromlocal, prevtags)
494 495
495 496 fp.close()
496 497
497 498 self.invalidatecaches()
498 499
499 500 if '.hgtags' not in self.dirstate:
500 501 self[None].add(['.hgtags'])
501 502
502 503 m = matchmod.exact(self.root, '', ['.hgtags'])
503 504 tagnode = self.commit(message, user, date, extra=extra, match=m)
504 505
505 506 for name in names:
506 507 self.hook('tag', node=hex(node), tag=name, local=local)
507 508
508 509 return tagnode
509 510
510 511 def tag(self, names, node, message, local, user, date):
511 512 '''tag a revision with one or more symbolic names.
512 513
513 514 names is a list of strings or, when adding a single tag, names may be a
514 515 string.
515 516
516 517 if local is True, the tags are stored in a per-repository file.
517 518 otherwise, they are stored in the .hgtags file, and a new
518 519 changeset is committed with the change.
519 520
520 521 keyword arguments:
521 522
522 523 local: whether to store tags in non-version-controlled file
523 524 (default False)
524 525
525 526 message: commit message to use if committing
526 527
527 528 user: name of user to use if committing
528 529
529 530 date: date tuple to use if committing'''
530 531
531 532 if not local:
532 533 for x in self.status()[:5]:
533 534 if '.hgtags' in x:
534 535 raise util.Abort(_('working copy of .hgtags is changed '
535 536 '(please commit .hgtags manually)'))
536 537
537 538 self.tags() # instantiate the cache
538 539 self._tag(names, node, message, local, user, date)
539 540
540 541 @filteredpropertycache
541 542 def _tagscache(self):
542 543 '''Returns a tagscache object that contains various tags related
543 544 caches.'''
544 545
545 546 # This simplifies its cache management by having one decorated
546 547 # function (this one) and the rest simply fetch things from it.
547 548 class tagscache(object):
548 549 def __init__(self):
549 550 # These two define the set of tags for this repository. tags
550 551 # maps tag name to node; tagtypes maps tag name to 'global' or
551 552 # 'local'. (Global tags are defined by .hgtags across all
552 553 # heads, and local tags are defined in .hg/localtags.)
553 554 # They constitute the in-memory cache of tags.
554 555 self.tags = self.tagtypes = None
555 556
556 557 self.nodetagscache = self.tagslist = None
557 558
558 559 cache = tagscache()
559 560 cache.tags, cache.tagtypes = self._findtags()
560 561
561 562 return cache
562 563
563 564 def tags(self):
564 565 '''return a mapping of tag to node'''
565 566 t = {}
566 567 if self.changelog.filteredrevs:
567 568 tags, tt = self._findtags()
568 569 else:
569 570 tags = self._tagscache.tags
570 571 for k, v in tags.iteritems():
571 572 try:
572 573 # ignore tags to unknown nodes
573 574 self.changelog.rev(v)
574 575 t[k] = v
575 576 except (error.LookupError, ValueError):
576 577 pass
577 578 return t
578 579
579 580 def _findtags(self):
580 581 '''Do the hard work of finding tags. Return a pair of dicts
581 582 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 583 maps tag name to a string like \'global\' or \'local\'.
583 584 Subclasses or extensions are free to add their own tags, but
584 585 should be aware that the returned dicts will be retained for the
585 586 duration of the localrepo object.'''
586 587
587 588 # XXX what tagtype should subclasses/extensions use? Currently
588 589 # mq and bookmarks add tags, but do not set the tagtype at all.
589 590 # Should each extension invent its own tag type? Should there
590 591 # be one tagtype for all such "virtual" tags? Or is the status
591 592 # quo fine?
592 593
593 594 alltags = {} # map tag name to (node, hist)
594 595 tagtypes = {}
595 596
596 597 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 598 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598 599
599 600 # Build the return dicts. Have to re-encode tag names because
600 601 # the tags module always uses UTF-8 (in order not to lose info
601 602 # writing to the cache), but the rest of Mercurial wants them in
602 603 # local encoding.
603 604 tags = {}
604 605 for (name, (node, hist)) in alltags.iteritems():
605 606 if node != nullid:
606 607 tags[encoding.tolocal(name)] = node
607 608 tags['tip'] = self.changelog.tip()
608 609 tagtypes = dict([(encoding.tolocal(name), value)
609 610 for (name, value) in tagtypes.iteritems()])
610 611 return (tags, tagtypes)
611 612
612 613 def tagtype(self, tagname):
613 614 '''
614 615 return the type of the given tag. result can be:
615 616
616 617 'local' : a local tag
617 618 'global' : a global tag
618 619 None : tag does not exist
619 620 '''
620 621
621 622 return self._tagscache.tagtypes.get(tagname)
622 623
623 624 def tagslist(self):
624 625 '''return a list of tags ordered by revision'''
625 626 if not self._tagscache.tagslist:
626 627 l = []
627 628 for t, n in self.tags().iteritems():
628 629 r = self.changelog.rev(n)
629 630 l.append((r, t, n))
630 631 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631 632
632 633 return self._tagscache.tagslist
633 634
634 635 def nodetags(self, node):
635 636 '''return the tags associated with a node'''
636 637 if not self._tagscache.nodetagscache:
637 638 nodetagscache = {}
638 639 for t, n in self._tagscache.tags.iteritems():
639 640 nodetagscache.setdefault(n, []).append(t)
640 641 for tags in nodetagscache.itervalues():
641 642 tags.sort()
642 643 self._tagscache.nodetagscache = nodetagscache
643 644 return self._tagscache.nodetagscache.get(node, [])
644 645
645 646 def nodebookmarks(self, node):
646 647 marks = []
647 648 for bookmark, n in self._bookmarks.iteritems():
648 649 if n == node:
649 650 marks.append(bookmark)
650 651 return sorted(marks)
651 652
652 653 def _cacheabletip(self):
653 654 """tip-most revision stable enought to used in persistent cache
654 655
655 656 This function is overwritten by MQ to ensure we do not write cache for
656 657 a part of the history that will likely change.
657 658
658 659 Efficient handling of filtered revision in branchcache should offer a
659 660 better alternative. But we are using this approach until it is ready.
660 661 """
661 662 cl = self.changelog
662 663 return cl.rev(cl.tip())
663 664
664 665 def branchmap(self):
665 666 '''returns a dictionary {branch: [branchheads]}'''
666 667 if self.changelog.filteredrevs:
667 668 # some changeset are excluded we can't use the cache
668 669 bmap = branchmap.branchcache()
669 670 bmap.update(self, (self[r] for r in self))
670 671 return bmap
671 672 else:
672 673 branchmap.updatecache(self)
673 674 return self._branchcache
674 675
675 676
676 677 def _branchtip(self, heads):
677 678 '''return the tipmost branch head in heads'''
678 679 tip = heads[-1]
679 680 for h in reversed(heads):
680 681 if not self[h].closesbranch():
681 682 tip = h
682 683 break
683 684 return tip
684 685
685 686 def branchtip(self, branch):
686 687 '''return the tip node for a given branch'''
687 688 if branch not in self.branchmap():
688 689 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
689 690 return self._branchtip(self.branchmap()[branch])
690 691
691 692 def branchtags(self):
692 693 '''return a dict where branch names map to the tipmost head of
693 694 the branch, open heads come before closed'''
694 695 bt = {}
695 696 for bn, heads in self.branchmap().iteritems():
696 697 bt[bn] = self._branchtip(heads)
697 698 return bt
698 699
699 700 def lookup(self, key):
700 701 return self[key].node()
701 702
702 703 def lookupbranch(self, key, remote=None):
703 704 repo = remote or self
704 705 if key in repo.branchmap():
705 706 return key
706 707
707 708 repo = (remote and remote.local()) and remote or self
708 709 return repo[key].branch()
709 710
710 711 def known(self, nodes):
711 712 nm = self.changelog.nodemap
712 713 pc = self._phasecache
713 714 result = []
714 715 for n in nodes:
715 716 r = nm.get(n)
716 717 resp = not (r is None or pc.phase(self, r) >= phases.secret)
717 718 result.append(resp)
718 719 return result
719 720
720 721 def local(self):
721 722 return self
722 723
723 724 def cancopy(self):
724 725 return self.local() # so statichttprepo's override of local() works
725 726
726 727 def join(self, f):
727 728 return os.path.join(self.path, f)
728 729
729 730 def wjoin(self, f):
730 731 return os.path.join(self.root, f)
731 732
732 733 def file(self, f):
733 734 if f[0] == '/':
734 735 f = f[1:]
735 736 return filelog.filelog(self.sopener, f)
736 737
737 738 def changectx(self, changeid):
738 739 return self[changeid]
739 740
740 741 def parents(self, changeid=None):
741 742 '''get list of changectxs for parents of changeid'''
742 743 return self[changeid].parents()
743 744
744 745 def setparents(self, p1, p2=nullid):
745 746 copies = self.dirstate.setparents(p1, p2)
746 747 if copies:
747 748 # Adjust copy records, the dirstate cannot do it, it
748 749 # requires access to parents manifests. Preserve them
749 750 # only for entries added to first parent.
750 751 pctx = self[p1]
751 752 for f in copies:
752 753 if f not in pctx and copies[f] in pctx:
753 754 self.dirstate.copy(copies[f], f)
754 755
755 756 def filectx(self, path, changeid=None, fileid=None):
756 757 """changeid can be a changeset revision, node, or tag.
757 758 fileid can be a file revision or node."""
758 759 return context.filectx(self, path, changeid, fileid)
759 760
760 761 def getcwd(self):
761 762 return self.dirstate.getcwd()
762 763
763 764 def pathto(self, f, cwd=None):
764 765 return self.dirstate.pathto(f, cwd)
765 766
766 767 def wfile(self, f, mode='r'):
767 768 return self.wopener(f, mode)
768 769
769 770 def _link(self, f):
770 771 return os.path.islink(self.wjoin(f))
771 772
772 773 def _loadfilter(self, filter):
773 774 if filter not in self.filterpats:
774 775 l = []
775 776 for pat, cmd in self.ui.configitems(filter):
776 777 if cmd == '!':
777 778 continue
778 779 mf = matchmod.match(self.root, '', [pat])
779 780 fn = None
780 781 params = cmd
781 782 for name, filterfn in self._datafilters.iteritems():
782 783 if cmd.startswith(name):
783 784 fn = filterfn
784 785 params = cmd[len(name):].lstrip()
785 786 break
786 787 if not fn:
787 788 fn = lambda s, c, **kwargs: util.filter(s, c)
788 789 # Wrap old filters not supporting keyword arguments
789 790 if not inspect.getargspec(fn)[2]:
790 791 oldfn = fn
791 792 fn = lambda s, c, **kwargs: oldfn(s, c)
792 793 l.append((mf, fn, params))
793 794 self.filterpats[filter] = l
794 795 return self.filterpats[filter]
795 796
796 797 def _filter(self, filterpats, filename, data):
797 798 for mf, fn, cmd in filterpats:
798 799 if mf(filename):
799 800 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
800 801 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
801 802 break
802 803
803 804 return data
804 805
805 806 @unfilteredpropertycache
806 807 def _encodefilterpats(self):
807 808 return self._loadfilter('encode')
808 809
809 810 @unfilteredpropertycache
810 811 def _decodefilterpats(self):
811 812 return self._loadfilter('decode')
812 813
813 814 def adddatafilter(self, name, filter):
814 815 self._datafilters[name] = filter
815 816
816 817 def wread(self, filename):
817 818 if self._link(filename):
818 819 data = os.readlink(self.wjoin(filename))
819 820 else:
820 821 data = self.wopener.read(filename)
821 822 return self._filter(self._encodefilterpats, filename, data)
822 823
823 824 def wwrite(self, filename, data, flags):
824 825 data = self._filter(self._decodefilterpats, filename, data)
825 826 if 'l' in flags:
826 827 self.wopener.symlink(data, filename)
827 828 else:
828 829 self.wopener.write(filename, data)
829 830 if 'x' in flags:
830 831 util.setflags(self.wjoin(filename), False, True)
831 832
832 833 def wwritedata(self, filename, data):
833 834 return self._filter(self._decodefilterpats, filename, data)
834 835
835 836 def transaction(self, desc):
836 837 tr = self._transref and self._transref() or None
837 838 if tr and tr.running():
838 839 return tr.nest()
839 840
840 841 # abort here if the journal already exists
841 842 if os.path.exists(self.sjoin("journal")):
842 843 raise error.RepoError(
843 844 _("abandoned transaction found - run hg recover"))
844 845
845 846 self._writejournal(desc)
846 847 renames = [(x, undoname(x)) for x in self._journalfiles()]
847 848
848 849 tr = transaction.transaction(self.ui.warn, self.sopener,
849 850 self.sjoin("journal"),
850 851 aftertrans(renames),
851 852 self.store.createmode)
852 853 self._transref = weakref.ref(tr)
853 854 return tr
854 855
855 856 def _journalfiles(self):
856 857 return (self.sjoin('journal'), self.join('journal.dirstate'),
857 858 self.join('journal.branch'), self.join('journal.desc'),
858 859 self.join('journal.bookmarks'),
859 860 self.sjoin('journal.phaseroots'))
860 861
861 862 def undofiles(self):
862 863 return [undoname(x) for x in self._journalfiles()]
863 864
864 865 def _writejournal(self, desc):
865 866 self.opener.write("journal.dirstate",
866 867 self.opener.tryread("dirstate"))
867 868 self.opener.write("journal.branch",
868 869 encoding.fromlocal(self.dirstate.branch()))
869 870 self.opener.write("journal.desc",
870 871 "%d\n%s\n" % (len(self), desc))
871 872 self.opener.write("journal.bookmarks",
872 873 self.opener.tryread("bookmarks"))
873 874 self.sopener.write("journal.phaseroots",
874 875 self.sopener.tryread("phaseroots"))
875 876
876 877 def recover(self):
877 878 lock = self.lock()
878 879 try:
879 880 if os.path.exists(self.sjoin("journal")):
880 881 self.ui.status(_("rolling back interrupted transaction\n"))
881 882 transaction.rollback(self.sopener, self.sjoin("journal"),
882 883 self.ui.warn)
883 884 self.invalidate()
884 885 return True
885 886 else:
886 887 self.ui.warn(_("no interrupted transaction available\n"))
887 888 return False
888 889 finally:
889 890 lock.release()
890 891
891 892 def rollback(self, dryrun=False, force=False):
892 893 wlock = lock = None
893 894 try:
894 895 wlock = self.wlock()
895 896 lock = self.lock()
896 897 if os.path.exists(self.sjoin("undo")):
897 898 return self._rollback(dryrun, force)
898 899 else:
899 900 self.ui.warn(_("no rollback information available\n"))
900 901 return 1
901 902 finally:
902 903 release(lock, wlock)
903 904
904 905 @unfilteredmethod # Until we get smarter cache management
905 906 def _rollback(self, dryrun, force):
906 907 ui = self.ui
907 908 try:
908 909 args = self.opener.read('undo.desc').splitlines()
909 910 (oldlen, desc, detail) = (int(args[0]), args[1], None)
910 911 if len(args) >= 3:
911 912 detail = args[2]
912 913 oldtip = oldlen - 1
913 914
914 915 if detail and ui.verbose:
915 916 msg = (_('repository tip rolled back to revision %s'
916 917 ' (undo %s: %s)\n')
917 918 % (oldtip, desc, detail))
918 919 else:
919 920 msg = (_('repository tip rolled back to revision %s'
920 921 ' (undo %s)\n')
921 922 % (oldtip, desc))
922 923 except IOError:
923 924 msg = _('rolling back unknown transaction\n')
924 925 desc = None
925 926
926 927 if not force and self['.'] != self['tip'] and desc == 'commit':
927 928 raise util.Abort(
928 929 _('rollback of last commit while not checked out '
929 930 'may lose data'), hint=_('use -f to force'))
930 931
931 932 ui.status(msg)
932 933 if dryrun:
933 934 return 0
934 935
935 936 parents = self.dirstate.parents()
936 937 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
937 938 if os.path.exists(self.join('undo.bookmarks')):
938 939 util.rename(self.join('undo.bookmarks'),
939 940 self.join('bookmarks'))
940 941 if os.path.exists(self.sjoin('undo.phaseroots')):
941 942 util.rename(self.sjoin('undo.phaseroots'),
942 943 self.sjoin('phaseroots'))
943 944 self.invalidate()
944 945
945 946 # Discard all cache entries to force reloading everything.
946 947 self._filecache.clear()
947 948
948 949 parentgone = (parents[0] not in self.changelog.nodemap or
949 950 parents[1] not in self.changelog.nodemap)
950 951 if parentgone:
951 952 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
952 953 try:
953 954 branch = self.opener.read('undo.branch')
954 955 self.dirstate.setbranch(encoding.tolocal(branch))
955 956 except IOError:
956 957 ui.warn(_('named branch could not be reset: '
957 958 'current branch is still \'%s\'\n')
958 959 % self.dirstate.branch())
959 960
960 961 self.dirstate.invalidate()
961 962 parents = tuple([p.rev() for p in self.parents()])
962 963 if len(parents) > 1:
963 964 ui.status(_('working directory now based on '
964 965 'revisions %d and %d\n') % parents)
965 966 else:
966 967 ui.status(_('working directory now based on '
967 968 'revision %d\n') % parents)
968 969 # TODO: if we know which new heads may result from this rollback, pass
969 970 # them to destroy(), which will prevent the branchhead cache from being
970 971 # invalidated.
971 972 self.destroyed()
972 973 return 0
973 974
974 975 def invalidatecaches(self):
975 976
976 977 if '_tagscache' in vars(self):
977 978 # can't use delattr on proxy
978 979 del self.__dict__['_tagscache']
979 980
980 981 self.unfiltered()._branchcache = None # in UTF-8
981 982 self.invalidatevolatilesets()
982 983
983 984 def invalidatevolatilesets(self):
984 985 self.filteredrevcache.clear()
985 986 obsolete.clearobscaches(self)
986 987 if 'hiddenrevs' in vars(self):
987 988 del self.hiddenrevs
988 989
989 990 def invalidatedirstate(self):
990 991 '''Invalidates the dirstate, causing the next call to dirstate
991 992 to check if it was modified since the last time it was read,
992 993 rereading it if it has.
993 994
994 995 This is different to dirstate.invalidate() that it doesn't always
995 996 rereads the dirstate. Use dirstate.invalidate() if you want to
996 997 explicitly read the dirstate again (i.e. restoring it to a previous
997 998 known good state).'''
998 999 if hasunfilteredcache(self, 'dirstate'):
999 1000 for k in self.dirstate._filecache:
1000 1001 try:
1001 1002 delattr(self.dirstate, k)
1002 1003 except AttributeError:
1003 1004 pass
1004 1005 delattr(self.unfiltered(), 'dirstate')
1005 1006
1006 1007 def invalidate(self):
1007 1008 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1008 1009 for k in self._filecache:
1009 1010 # dirstate is invalidated separately in invalidatedirstate()
1010 1011 if k == 'dirstate':
1011 1012 continue
1012 1013
1013 1014 try:
1014 1015 delattr(unfiltered, k)
1015 1016 except AttributeError:
1016 1017 pass
1017 1018 self.invalidatecaches()
1018 1019
1019 1020 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1020 1021 try:
1021 1022 l = lock.lock(lockname, 0, releasefn, desc=desc)
1022 1023 except error.LockHeld, inst:
1023 1024 if not wait:
1024 1025 raise
1025 1026 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1026 1027 (desc, inst.locker))
1027 1028 # default to 600 seconds timeout
1028 1029 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1029 1030 releasefn, desc=desc)
1030 1031 if acquirefn:
1031 1032 acquirefn()
1032 1033 return l
1033 1034
1034 1035 def _afterlock(self, callback):
1035 1036 """add a callback to the current repository lock.
1036 1037
1037 1038 The callback will be executed on lock release."""
1038 1039 l = self._lockref and self._lockref()
1039 1040 if l:
1040 1041 l.postrelease.append(callback)
1041 1042 else:
1042 1043 callback()
1043 1044
1044 1045 def lock(self, wait=True):
1045 1046 '''Lock the repository store (.hg/store) and return a weak reference
1046 1047 to the lock. Use this before modifying the store (e.g. committing or
1047 1048 stripping). If you are opening a transaction, get a lock as well.)'''
1048 1049 l = self._lockref and self._lockref()
1049 1050 if l is not None and l.held:
1050 1051 l.lock()
1051 1052 return l
1052 1053
1053 1054 def unlock():
1054 1055 self.store.write()
1055 1056 if hasunfilteredcache(self, '_phasecache'):
1056 1057 self._phasecache.write()
1057 1058 for k, ce in self._filecache.items():
1058 1059 if k == 'dirstate':
1059 1060 continue
1060 1061 ce.refresh()
1061 1062
1062 1063 l = self._lock(self.sjoin("lock"), wait, unlock,
1063 1064 self.invalidate, _('repository %s') % self.origroot)
1064 1065 self._lockref = weakref.ref(l)
1065 1066 return l
1066 1067
1067 1068 def wlock(self, wait=True):
1068 1069 '''Lock the non-store parts of the repository (everything under
1069 1070 .hg except .hg/store) and return a weak reference to the lock.
1070 1071 Use this before modifying files in .hg.'''
1071 1072 l = self._wlockref and self._wlockref()
1072 1073 if l is not None and l.held:
1073 1074 l.lock()
1074 1075 return l
1075 1076
1076 1077 def unlock():
1077 1078 self.dirstate.write()
1078 1079 ce = self._filecache.get('dirstate')
1079 1080 if ce:
1080 1081 ce.refresh()
1081 1082
1082 1083 l = self._lock(self.join("wlock"), wait, unlock,
1083 1084 self.invalidatedirstate, _('working directory of %s') %
1084 1085 self.origroot)
1085 1086 self._wlockref = weakref.ref(l)
1086 1087 return l
1087 1088
1088 1089 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1089 1090 """
1090 1091 commit an individual file as part of a larger transaction
1091 1092 """
1092 1093
1093 1094 fname = fctx.path()
1094 1095 text = fctx.data()
1095 1096 flog = self.file(fname)
1096 1097 fparent1 = manifest1.get(fname, nullid)
1097 1098 fparent2 = fparent2o = manifest2.get(fname, nullid)
1098 1099
1099 1100 meta = {}
1100 1101 copy = fctx.renamed()
1101 1102 if copy and copy[0] != fname:
1102 1103 # Mark the new revision of this file as a copy of another
1103 1104 # file. This copy data will effectively act as a parent
1104 1105 # of this new revision. If this is a merge, the first
1105 1106 # parent will be the nullid (meaning "look up the copy data")
1106 1107 # and the second one will be the other parent. For example:
1107 1108 #
1108 1109 # 0 --- 1 --- 3 rev1 changes file foo
1109 1110 # \ / rev2 renames foo to bar and changes it
1110 1111 # \- 2 -/ rev3 should have bar with all changes and
1111 1112 # should record that bar descends from
1112 1113 # bar in rev2 and foo in rev1
1113 1114 #
1114 1115 # this allows this merge to succeed:
1115 1116 #
1116 1117 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1117 1118 # \ / merging rev3 and rev4 should use bar@rev2
1118 1119 # \- 2 --- 4 as the merge base
1119 1120 #
1120 1121
1121 1122 cfname = copy[0]
1122 1123 crev = manifest1.get(cfname)
1123 1124 newfparent = fparent2
1124 1125
1125 1126 if manifest2: # branch merge
1126 1127 if fparent2 == nullid or crev is None: # copied on remote side
1127 1128 if cfname in manifest2:
1128 1129 crev = manifest2[cfname]
1129 1130 newfparent = fparent1
1130 1131
1131 1132 # find source in nearest ancestor if we've lost track
1132 1133 if not crev:
1133 1134 self.ui.debug(" %s: searching for copy revision for %s\n" %
1134 1135 (fname, cfname))
1135 1136 for ancestor in self[None].ancestors():
1136 1137 if cfname in ancestor:
1137 1138 crev = ancestor[cfname].filenode()
1138 1139 break
1139 1140
1140 1141 if crev:
1141 1142 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1142 1143 meta["copy"] = cfname
1143 1144 meta["copyrev"] = hex(crev)
1144 1145 fparent1, fparent2 = nullid, newfparent
1145 1146 else:
1146 1147 self.ui.warn(_("warning: can't find ancestor for '%s' "
1147 1148 "copied from '%s'!\n") % (fname, cfname))
1148 1149
1149 1150 elif fparent2 != nullid:
1150 1151 # is one parent an ancestor of the other?
1151 1152 fparentancestor = flog.ancestor(fparent1, fparent2)
1152 1153 if fparentancestor == fparent1:
1153 1154 fparent1, fparent2 = fparent2, nullid
1154 1155 elif fparentancestor == fparent2:
1155 1156 fparent2 = nullid
1156 1157
1157 1158 # is the file changed?
1158 1159 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1159 1160 changelist.append(fname)
1160 1161 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1161 1162
1162 1163 # are just the flags changed during merge?
1163 1164 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1164 1165 changelist.append(fname)
1165 1166
1166 1167 return fparent1
1167 1168
1168 1169 @unfilteredmethod
1169 1170 def commit(self, text="", user=None, date=None, match=None, force=False,
1170 1171 editor=False, extra={}):
1171 1172 """Add a new revision to current repository.
1172 1173
1173 1174 Revision information is gathered from the working directory,
1174 1175 match can be used to filter the committed files. If editor is
1175 1176 supplied, it is called to get a commit message.
1176 1177 """
1177 1178
1178 1179 def fail(f, msg):
1179 1180 raise util.Abort('%s: %s' % (f, msg))
1180 1181
1181 1182 if not match:
1182 1183 match = matchmod.always(self.root, '')
1183 1184
1184 1185 if not force:
1185 1186 vdirs = []
1186 1187 match.dir = vdirs.append
1187 1188 match.bad = fail
1188 1189
1189 1190 wlock = self.wlock()
1190 1191 try:
1191 1192 wctx = self[None]
1192 1193 merge = len(wctx.parents()) > 1
1193 1194
1194 1195 if (not force and merge and match and
1195 1196 (match.files() or match.anypats())):
1196 1197 raise util.Abort(_('cannot partially commit a merge '
1197 1198 '(do not specify files or patterns)'))
1198 1199
1199 1200 changes = self.status(match=match, clean=force)
1200 1201 if force:
1201 1202 changes[0].extend(changes[6]) # mq may commit unchanged files
1202 1203
1203 1204 # check subrepos
1204 1205 subs = []
1205 1206 commitsubs = set()
1206 1207 newstate = wctx.substate.copy()
1207 1208 # only manage subrepos and .hgsubstate if .hgsub is present
1208 1209 if '.hgsub' in wctx:
1209 1210 # we'll decide whether to track this ourselves, thanks
1210 1211 if '.hgsubstate' in changes[0]:
1211 1212 changes[0].remove('.hgsubstate')
1212 1213 if '.hgsubstate' in changes[2]:
1213 1214 changes[2].remove('.hgsubstate')
1214 1215
1215 1216 # compare current state to last committed state
1216 1217 # build new substate based on last committed state
1217 1218 oldstate = wctx.p1().substate
1218 1219 for s in sorted(newstate.keys()):
1219 1220 if not match(s):
1220 1221 # ignore working copy, use old state if present
1221 1222 if s in oldstate:
1222 1223 newstate[s] = oldstate[s]
1223 1224 continue
1224 1225 if not force:
1225 1226 raise util.Abort(
1226 1227 _("commit with new subrepo %s excluded") % s)
1227 1228 if wctx.sub(s).dirty(True):
1228 1229 if not self.ui.configbool('ui', 'commitsubrepos'):
1229 1230 raise util.Abort(
1230 1231 _("uncommitted changes in subrepo %s") % s,
1231 1232 hint=_("use --subrepos for recursive commit"))
1232 1233 subs.append(s)
1233 1234 commitsubs.add(s)
1234 1235 else:
1235 1236 bs = wctx.sub(s).basestate()
1236 1237 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1237 1238 if oldstate.get(s, (None, None, None))[1] != bs:
1238 1239 subs.append(s)
1239 1240
1240 1241 # check for removed subrepos
1241 1242 for p in wctx.parents():
1242 1243 r = [s for s in p.substate if s not in newstate]
1243 1244 subs += [s for s in r if match(s)]
1244 1245 if subs:
1245 1246 if (not match('.hgsub') and
1246 1247 '.hgsub' in (wctx.modified() + wctx.added())):
1247 1248 raise util.Abort(
1248 1249 _("can't commit subrepos without .hgsub"))
1249 1250 changes[0].insert(0, '.hgsubstate')
1250 1251
1251 1252 elif '.hgsub' in changes[2]:
1252 1253 # clean up .hgsubstate when .hgsub is removed
1253 1254 if ('.hgsubstate' in wctx and
1254 1255 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1255 1256 changes[2].insert(0, '.hgsubstate')
1256 1257
1257 1258 # make sure all explicit patterns are matched
1258 1259 if not force and match.files():
1259 1260 matched = set(changes[0] + changes[1] + changes[2])
1260 1261
1261 1262 for f in match.files():
1262 1263 f = self.dirstate.normalize(f)
1263 1264 if f == '.' or f in matched or f in wctx.substate:
1264 1265 continue
1265 1266 if f in changes[3]: # missing
1266 1267 fail(f, _('file not found!'))
1267 1268 if f in vdirs: # visited directory
1268 1269 d = f + '/'
1269 1270 for mf in matched:
1270 1271 if mf.startswith(d):
1271 1272 break
1272 1273 else:
1273 1274 fail(f, _("no match under directory!"))
1274 1275 elif f not in self.dirstate:
1275 1276 fail(f, _("file not tracked!"))
1276 1277
1277 1278 if (not force and not extra.get("close") and not merge
1278 1279 and not (changes[0] or changes[1] or changes[2])
1279 1280 and wctx.branch() == wctx.p1().branch()):
1280 1281 return None
1281 1282
1282 1283 if merge and changes[3]:
1283 1284 raise util.Abort(_("cannot commit merge with missing files"))
1284 1285
1285 1286 ms = mergemod.mergestate(self)
1286 1287 for f in changes[0]:
1287 1288 if f in ms and ms[f] == 'u':
1288 1289 raise util.Abort(_("unresolved merge conflicts "
1289 1290 "(see hg help resolve)"))
1290 1291
1291 1292 cctx = context.workingctx(self, text, user, date, extra, changes)
1292 1293 if editor:
1293 1294 cctx._text = editor(self, cctx, subs)
1294 1295 edited = (text != cctx._text)
1295 1296
1296 1297 # commit subs and write new state
1297 1298 if subs:
1298 1299 for s in sorted(commitsubs):
1299 1300 sub = wctx.sub(s)
1300 1301 self.ui.status(_('committing subrepository %s\n') %
1301 1302 subrepo.subrelpath(sub))
1302 1303 sr = sub.commit(cctx._text, user, date)
1303 1304 newstate[s] = (newstate[s][0], sr)
1304 1305 subrepo.writestate(self, newstate)
1305 1306
1306 1307 # Save commit message in case this transaction gets rolled back
1307 1308 # (e.g. by a pretxncommit hook). Leave the content alone on
1308 1309 # the assumption that the user will use the same editor again.
1309 1310 msgfn = self.savecommitmessage(cctx._text)
1310 1311
1311 1312 p1, p2 = self.dirstate.parents()
1312 1313 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1313 1314 try:
1314 1315 self.hook("precommit", throw=True, parent1=hookp1,
1315 1316 parent2=hookp2)
1316 1317 ret = self.commitctx(cctx, True)
1317 1318 except: # re-raises
1318 1319 if edited:
1319 1320 self.ui.write(
1320 1321 _('note: commit message saved in %s\n') % msgfn)
1321 1322 raise
1322 1323
1323 1324 # update bookmarks, dirstate and mergestate
1324 1325 bookmarks.update(self, [p1, p2], ret)
1325 1326 for f in changes[0] + changes[1]:
1326 1327 self.dirstate.normal(f)
1327 1328 for f in changes[2]:
1328 1329 self.dirstate.drop(f)
1329 1330 self.dirstate.setparents(ret)
1330 1331 ms.reset()
1331 1332 finally:
1332 1333 wlock.release()
1333 1334
1334 1335 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1335 1336 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1336 1337 self._afterlock(commithook)
1337 1338 return ret
1338 1339
1339 1340 @unfilteredmethod
1340 1341 def commitctx(self, ctx, error=False):
1341 1342 """Add a new revision to current repository.
1342 1343 Revision information is passed via the context argument.
1343 1344 """
1344 1345
1345 1346 tr = lock = None
1346 1347 removed = list(ctx.removed())
1347 1348 p1, p2 = ctx.p1(), ctx.p2()
1348 1349 user = ctx.user()
1349 1350
1350 1351 lock = self.lock()
1351 1352 try:
1352 1353 tr = self.transaction("commit")
1353 1354 trp = weakref.proxy(tr)
1354 1355
1355 1356 if ctx.files():
1356 1357 m1 = p1.manifest().copy()
1357 1358 m2 = p2.manifest()
1358 1359
1359 1360 # check in files
1360 1361 new = {}
1361 1362 changed = []
1362 1363 linkrev = len(self)
1363 1364 for f in sorted(ctx.modified() + ctx.added()):
1364 1365 self.ui.note(f + "\n")
1365 1366 try:
1366 1367 fctx = ctx[f]
1367 1368 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1368 1369 changed)
1369 1370 m1.set(f, fctx.flags())
1370 1371 except OSError, inst:
1371 1372 self.ui.warn(_("trouble committing %s!\n") % f)
1372 1373 raise
1373 1374 except IOError, inst:
1374 1375 errcode = getattr(inst, 'errno', errno.ENOENT)
1375 1376 if error or errcode and errcode != errno.ENOENT:
1376 1377 self.ui.warn(_("trouble committing %s!\n") % f)
1377 1378 raise
1378 1379 else:
1379 1380 removed.append(f)
1380 1381
1381 1382 # update manifest
1382 1383 m1.update(new)
1383 1384 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1384 1385 drop = [f for f in removed if f in m1]
1385 1386 for f in drop:
1386 1387 del m1[f]
1387 1388 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1388 1389 p2.manifestnode(), (new, drop))
1389 1390 files = changed + removed
1390 1391 else:
1391 1392 mn = p1.manifestnode()
1392 1393 files = []
1393 1394
1394 1395 # update changelog
1395 1396 self.changelog.delayupdate()
1396 1397 n = self.changelog.add(mn, files, ctx.description(),
1397 1398 trp, p1.node(), p2.node(),
1398 1399 user, ctx.date(), ctx.extra().copy())
1399 1400 p = lambda: self.changelog.writepending() and self.root or ""
1400 1401 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1401 1402 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1402 1403 parent2=xp2, pending=p)
1403 1404 self.changelog.finalize(trp)
1404 1405 # set the new commit is proper phase
1405 1406 targetphase = phases.newcommitphase(self.ui)
1406 1407 if targetphase:
1407 1408 # retract boundary do not alter parent changeset.
1408 1409 # if a parent have higher the resulting phase will
1409 1410 # be compliant anyway
1410 1411 #
1411 1412 # if minimal phase was 0 we don't need to retract anything
1412 1413 phases.retractboundary(self, targetphase, [n])
1413 1414 tr.close()
1414 1415 branchmap.updatecache(self)
1415 1416 return n
1416 1417 finally:
1417 1418 if tr:
1418 1419 tr.release()
1419 1420 lock.release()
1420 1421
1421 1422 @unfilteredmethod
1422 1423 def destroyed(self, newheadnodes=None):
1423 1424 '''Inform the repository that nodes have been destroyed.
1424 1425 Intended for use by strip and rollback, so there's a common
1425 1426 place for anything that has to be done after destroying history.
1426 1427
1427 1428 If you know the branchheadcache was uptodate before nodes were removed
1428 1429 and you also know the set of candidate new heads that may have resulted
1429 1430 from the destruction, you can set newheadnodes. This will enable the
1430 1431 code to update the branchheads cache, rather than having future code
1431 1432 decide it's invalid and regenerating it from scratch.
1432 1433 '''
1433 1434 # If we have info, newheadnodes, on how to update the branch cache, do
1434 1435 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1435 1436 # will be caught the next time it is read.
1436 1437 if newheadnodes:
1437 1438 ctxgen = (self[node] for node in newheadnodes
1438 1439 if self.changelog.hasnode(node))
1439 1440 cache = self._branchcache
1440 1441 cache.update(self, ctxgen)
1441 1442 cache.write(self)
1442 1443
1443 1444 # Ensure the persistent tag cache is updated. Doing it now
1444 1445 # means that the tag cache only has to worry about destroyed
1445 1446 # heads immediately after a strip/rollback. That in turn
1446 1447 # guarantees that "cachetip == currenttip" (comparing both rev
1447 1448 # and node) always means no nodes have been added or destroyed.
1448 1449
1449 1450 # XXX this is suboptimal when qrefresh'ing: we strip the current
1450 1451 # head, refresh the tag cache, then immediately add a new head.
1451 1452 # But I think doing it this way is necessary for the "instant
1452 1453 # tag cache retrieval" case to work.
1453 1454 self.invalidatecaches()
1454 1455
1455 1456 # Discard all cache entries to force reloading everything.
1456 1457 self._filecache.clear()
1457 1458
1458 1459 def walk(self, match, node=None):
1459 1460 '''
1460 1461 walk recursively through the directory tree or a given
1461 1462 changeset, finding all files matched by the match
1462 1463 function
1463 1464 '''
1464 1465 return self[node].walk(match)
1465 1466
1466 1467 def status(self, node1='.', node2=None, match=None,
1467 1468 ignored=False, clean=False, unknown=False,
1468 1469 listsubrepos=False):
1469 1470 """return status of files between two nodes or node and working
1470 1471 directory.
1471 1472
1472 1473 If node1 is None, use the first dirstate parent instead.
1473 1474 If node2 is None, compare node1 with working directory.
1474 1475 """
1475 1476
1476 1477 def mfmatches(ctx):
1477 1478 mf = ctx.manifest().copy()
1478 1479 if match.always():
1479 1480 return mf
1480 1481 for fn in mf.keys():
1481 1482 if not match(fn):
1482 1483 del mf[fn]
1483 1484 return mf
1484 1485
1485 1486 if isinstance(node1, context.changectx):
1486 1487 ctx1 = node1
1487 1488 else:
1488 1489 ctx1 = self[node1]
1489 1490 if isinstance(node2, context.changectx):
1490 1491 ctx2 = node2
1491 1492 else:
1492 1493 ctx2 = self[node2]
1493 1494
1494 1495 working = ctx2.rev() is None
1495 1496 parentworking = working and ctx1 == self['.']
1496 1497 match = match or matchmod.always(self.root, self.getcwd())
1497 1498 listignored, listclean, listunknown = ignored, clean, unknown
1498 1499
1499 1500 # load earliest manifest first for caching reasons
1500 1501 if not working and ctx2.rev() < ctx1.rev():
1501 1502 ctx2.manifest()
1502 1503
1503 1504 if not parentworking:
1504 1505 def bad(f, msg):
1505 1506 # 'f' may be a directory pattern from 'match.files()',
1506 1507 # so 'f not in ctx1' is not enough
1507 1508 if f not in ctx1 and f not in ctx1.dirs():
1508 1509 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1509 1510 match.bad = bad
1510 1511
1511 1512 if working: # we need to scan the working dir
1512 1513 subrepos = []
1513 1514 if '.hgsub' in self.dirstate:
1514 1515 subrepos = ctx2.substate.keys()
1515 1516 s = self.dirstate.status(match, subrepos, listignored,
1516 1517 listclean, listunknown)
1517 1518 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1518 1519
1519 1520 # check for any possibly clean files
1520 1521 if parentworking and cmp:
1521 1522 fixup = []
1522 1523 # do a full compare of any files that might have changed
1523 1524 for f in sorted(cmp):
1524 1525 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1525 1526 or ctx1[f].cmp(ctx2[f])):
1526 1527 modified.append(f)
1527 1528 else:
1528 1529 fixup.append(f)
1529 1530
1530 1531 # update dirstate for files that are actually clean
1531 1532 if fixup:
1532 1533 if listclean:
1533 1534 clean += fixup
1534 1535
1535 1536 try:
1536 1537 # updating the dirstate is optional
1537 1538 # so we don't wait on the lock
1538 1539 wlock = self.wlock(False)
1539 1540 try:
1540 1541 for f in fixup:
1541 1542 self.dirstate.normal(f)
1542 1543 finally:
1543 1544 wlock.release()
1544 1545 except error.LockError:
1545 1546 pass
1546 1547
1547 1548 if not parentworking:
1548 1549 mf1 = mfmatches(ctx1)
1549 1550 if working:
1550 1551 # we are comparing working dir against non-parent
1551 1552 # generate a pseudo-manifest for the working dir
1552 1553 mf2 = mfmatches(self['.'])
1553 1554 for f in cmp + modified + added:
1554 1555 mf2[f] = None
1555 1556 mf2.set(f, ctx2.flags(f))
1556 1557 for f in removed:
1557 1558 if f in mf2:
1558 1559 del mf2[f]
1559 1560 else:
1560 1561 # we are comparing two revisions
1561 1562 deleted, unknown, ignored = [], [], []
1562 1563 mf2 = mfmatches(ctx2)
1563 1564
1564 1565 modified, added, clean = [], [], []
1565 1566 withflags = mf1.withflags() | mf2.withflags()
1566 1567 for fn in mf2:
1567 1568 if fn in mf1:
1568 1569 if (fn not in deleted and
1569 1570 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1570 1571 (mf1[fn] != mf2[fn] and
1571 1572 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1572 1573 modified.append(fn)
1573 1574 elif listclean:
1574 1575 clean.append(fn)
1575 1576 del mf1[fn]
1576 1577 elif fn not in deleted:
1577 1578 added.append(fn)
1578 1579 removed = mf1.keys()
1579 1580
1580 1581 if working and modified and not self.dirstate._checklink:
1581 1582 # Symlink placeholders may get non-symlink-like contents
1582 1583 # via user error or dereferencing by NFS or Samba servers,
1583 1584 # so we filter out any placeholders that don't look like a
1584 1585 # symlink
1585 1586 sane = []
1586 1587 for f in modified:
1587 1588 if ctx2.flags(f) == 'l':
1588 1589 d = ctx2[f].data()
1589 1590 if len(d) >= 1024 or '\n' in d or util.binary(d):
1590 1591 self.ui.debug('ignoring suspect symlink placeholder'
1591 1592 ' "%s"\n' % f)
1592 1593 continue
1593 1594 sane.append(f)
1594 1595 modified = sane
1595 1596
1596 1597 r = modified, added, removed, deleted, unknown, ignored, clean
1597 1598
1598 1599 if listsubrepos:
1599 1600 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1600 1601 if working:
1601 1602 rev2 = None
1602 1603 else:
1603 1604 rev2 = ctx2.substate[subpath][1]
1604 1605 try:
1605 1606 submatch = matchmod.narrowmatcher(subpath, match)
1606 1607 s = sub.status(rev2, match=submatch, ignored=listignored,
1607 1608 clean=listclean, unknown=listunknown,
1608 1609 listsubrepos=True)
1609 1610 for rfiles, sfiles in zip(r, s):
1610 1611 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1611 1612 except error.LookupError:
1612 1613 self.ui.status(_("skipping missing subrepository: %s\n")
1613 1614 % subpath)
1614 1615
1615 1616 for l in r:
1616 1617 l.sort()
1617 1618 return r
1618 1619
1619 1620 def heads(self, start=None):
1620 1621 heads = self.changelog.heads(start)
1621 1622 # sort the output in rev descending order
1622 1623 return sorted(heads, key=self.changelog.rev, reverse=True)
1623 1624
1624 1625 def branchheads(self, branch=None, start=None, closed=False):
1625 1626 '''return a (possibly filtered) list of heads for the given branch
1626 1627
1627 1628 Heads are returned in topological order, from newest to oldest.
1628 1629 If branch is None, use the dirstate branch.
1629 1630 If start is not None, return only heads reachable from start.
1630 1631 If closed is True, return heads that are marked as closed as well.
1631 1632 '''
1632 1633 if branch is None:
1633 1634 branch = self[None].branch()
1634 1635 branches = self.branchmap()
1635 1636 if branch not in branches:
1636 1637 return []
1637 1638 # the cache returns heads ordered lowest to highest
1638 1639 bheads = list(reversed(branches[branch]))
1639 1640 if start is not None:
1640 1641 # filter out the heads that cannot be reached from startrev
1641 1642 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1642 1643 bheads = [h for h in bheads if h in fbheads]
1643 1644 if not closed:
1644 1645 bheads = [h for h in bheads if not self[h].closesbranch()]
1645 1646 return bheads
1646 1647
1647 1648 def branches(self, nodes):
1648 1649 if not nodes:
1649 1650 nodes = [self.changelog.tip()]
1650 1651 b = []
1651 1652 for n in nodes:
1652 1653 t = n
1653 1654 while True:
1654 1655 p = self.changelog.parents(n)
1655 1656 if p[1] != nullid or p[0] == nullid:
1656 1657 b.append((t, n, p[0], p[1]))
1657 1658 break
1658 1659 n = p[0]
1659 1660 return b
1660 1661
1661 1662 def between(self, pairs):
1662 1663 r = []
1663 1664
1664 1665 for top, bottom in pairs:
1665 1666 n, l, i = top, [], 0
1666 1667 f = 1
1667 1668
1668 1669 while n != bottom and n != nullid:
1669 1670 p = self.changelog.parents(n)[0]
1670 1671 if i == f:
1671 1672 l.append(n)
1672 1673 f = f * 2
1673 1674 n = p
1674 1675 i += 1
1675 1676
1676 1677 r.append(l)
1677 1678
1678 1679 return r
1679 1680
1680 1681 def pull(self, remote, heads=None, force=False):
1681 1682 # don't open transaction for nothing or you break future useful
1682 1683 # rollback call
1683 1684 tr = None
1684 1685 trname = 'pull\n' + util.hidepassword(remote.url())
1685 1686 lock = self.lock()
1686 1687 try:
1687 1688 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1688 1689 force=force)
1689 1690 common, fetch, rheads = tmp
1690 1691 if not fetch:
1691 1692 self.ui.status(_("no changes found\n"))
1692 1693 added = []
1693 1694 result = 0
1694 1695 else:
1695 1696 tr = self.transaction(trname)
1696 1697 if heads is None and list(common) == [nullid]:
1697 1698 self.ui.status(_("requesting all changes\n"))
1698 1699 elif heads is None and remote.capable('changegroupsubset'):
1699 1700 # issue1320, avoid a race if remote changed after discovery
1700 1701 heads = rheads
1701 1702
1702 1703 if remote.capable('getbundle'):
1703 1704 cg = remote.getbundle('pull', common=common,
1704 1705 heads=heads or rheads)
1705 1706 elif heads is None:
1706 1707 cg = remote.changegroup(fetch, 'pull')
1707 1708 elif not remote.capable('changegroupsubset'):
1708 1709 raise util.Abort(_("partial pull cannot be done because "
1709 1710 "other repository doesn't support "
1710 1711 "changegroupsubset."))
1711 1712 else:
1712 1713 cg = remote.changegroupsubset(fetch, heads, 'pull')
1713 1714 clstart = len(self.changelog)
1714 1715 result = self.addchangegroup(cg, 'pull', remote.url())
1715 1716 clend = len(self.changelog)
1716 1717 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1717 1718
1718 1719 # compute target subset
1719 1720 if heads is None:
1720 1721 # We pulled every thing possible
1721 1722 # sync on everything common
1722 1723 subset = common + added
1723 1724 else:
1724 1725 # We pulled a specific subset
1725 1726 # sync on this subset
1726 1727 subset = heads
1727 1728
1728 1729 # Get remote phases data from remote
1729 1730 remotephases = remote.listkeys('phases')
1730 1731 publishing = bool(remotephases.get('publishing', False))
1731 1732 if remotephases and not publishing:
1732 1733 # remote is new and unpublishing
1733 1734 pheads, _dr = phases.analyzeremotephases(self, subset,
1734 1735 remotephases)
1735 1736 phases.advanceboundary(self, phases.public, pheads)
1736 1737 phases.advanceboundary(self, phases.draft, subset)
1737 1738 else:
1738 1739 # Remote is old or publishing all common changesets
1739 1740 # should be seen as public
1740 1741 phases.advanceboundary(self, phases.public, subset)
1741 1742
1742 1743 if obsolete._enabled:
1743 1744 self.ui.debug('fetching remote obsolete markers\n')
1744 1745 remoteobs = remote.listkeys('obsolete')
1745 1746 if 'dump0' in remoteobs:
1746 1747 if tr is None:
1747 1748 tr = self.transaction(trname)
1748 1749 for key in sorted(remoteobs, reverse=True):
1749 1750 if key.startswith('dump'):
1750 1751 data = base85.b85decode(remoteobs[key])
1751 1752 self.obsstore.mergemarkers(tr, data)
1752 1753 self.invalidatevolatilesets()
1753 1754 if tr is not None:
1754 1755 tr.close()
1755 1756 finally:
1756 1757 if tr is not None:
1757 1758 tr.release()
1758 1759 lock.release()
1759 1760
1760 1761 return result
1761 1762
1762 1763 def checkpush(self, force, revs):
1763 1764 """Extensions can override this function if additional checks have
1764 1765 to be performed before pushing, or call it if they override push
1765 1766 command.
1766 1767 """
1767 1768 pass
1768 1769
1769 1770 def push(self, remote, force=False, revs=None, newbranch=False):
1770 1771 '''Push outgoing changesets (limited by revs) from the current
1771 1772 repository to remote. Return an integer:
1772 1773 - None means nothing to push
1773 1774 - 0 means HTTP error
1774 1775 - 1 means we pushed and remote head count is unchanged *or*
1775 1776 we have outgoing changesets but refused to push
1776 1777 - other values as described by addchangegroup()
1777 1778 '''
1778 1779 # there are two ways to push to remote repo:
1779 1780 #
1780 1781 # addchangegroup assumes local user can lock remote
1781 1782 # repo (local filesystem, old ssh servers).
1782 1783 #
1783 1784 # unbundle assumes local user cannot lock remote repo (new ssh
1784 1785 # servers, http servers).
1785 1786
1786 1787 if not remote.canpush():
1787 1788 raise util.Abort(_("destination does not support push"))
1788 1789 unfi = self.unfiltered()
1789 1790 # get local lock as we might write phase data
1790 1791 locallock = self.lock()
1791 1792 try:
1792 1793 self.checkpush(force, revs)
1793 1794 lock = None
1794 1795 unbundle = remote.capable('unbundle')
1795 1796 if not unbundle:
1796 1797 lock = remote.lock()
1797 1798 try:
1798 1799 # discovery
1799 1800 fci = discovery.findcommonincoming
1800 1801 commoninc = fci(unfi, remote, force=force)
1801 1802 common, inc, remoteheads = commoninc
1802 1803 fco = discovery.findcommonoutgoing
1803 1804 outgoing = fco(unfi, remote, onlyheads=revs,
1804 1805 commoninc=commoninc, force=force)
1805 1806
1806 1807
1807 1808 if not outgoing.missing:
1808 1809 # nothing to push
1809 1810 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1810 1811 ret = None
1811 1812 else:
1812 1813 # something to push
1813 1814 if not force:
1814 1815 # if self.obsstore == False --> no obsolete
1815 1816 # then, save the iteration
1816 1817 if unfi.obsstore:
1817 1818 # this message are here for 80 char limit reason
1818 1819 mso = _("push includes obsolete changeset: %s!")
1819 1820 mst = "push includes %s changeset: %s!"
1820 1821 # plain versions for i18n tool to detect them
1821 1822 _("push includes unstable changeset: %s!")
1822 1823 _("push includes bumped changeset: %s!")
1823 1824 _("push includes divergent changeset: %s!")
1824 1825 # If we are to push if there is at least one
1825 1826 # obsolete or unstable changeset in missing, at
1826 1827 # least one of the missinghead will be obsolete or
1827 1828 # unstable. So checking heads only is ok
1828 1829 for node in outgoing.missingheads:
1829 1830 ctx = unfi[node]
1830 1831 if ctx.obsolete():
1831 1832 raise util.Abort(mso % ctx)
1832 1833 elif ctx.troubled():
1833 1834 raise util.Abort(_(mst)
1834 1835 % (ctx.troubles()[0],
1835 1836 ctx))
1836 1837 discovery.checkheads(unfi, remote, outgoing,
1837 1838 remoteheads, newbranch,
1838 1839 bool(inc))
1839 1840
1840 1841 # create a changegroup from local
1841 1842 if revs is None and not outgoing.excluded:
1842 1843 # push everything,
1843 1844 # use the fast path, no race possible on push
1844 1845 cg = self._changegroup(outgoing.missing, 'push')
1845 1846 else:
1846 1847 cg = self.getlocalbundle('push', outgoing)
1847 1848
1848 1849 # apply changegroup to remote
1849 1850 if unbundle:
1850 1851 # local repo finds heads on server, finds out what
1851 1852 # revs it must push. once revs transferred, if server
1852 1853 # finds it has different heads (someone else won
1853 1854 # commit/push race), server aborts.
1854 1855 if force:
1855 1856 remoteheads = ['force']
1856 1857 # ssh: return remote's addchangegroup()
1857 1858 # http: return remote's addchangegroup() or 0 for error
1858 1859 ret = remote.unbundle(cg, remoteheads, 'push')
1859 1860 else:
1860 1861 # we return an integer indicating remote head count
1861 1862 # change
1862 1863 ret = remote.addchangegroup(cg, 'push', self.url())
1863 1864
1864 1865 if ret:
1865 1866 # push succeed, synchronize target of the push
1866 1867 cheads = outgoing.missingheads
1867 1868 elif revs is None:
1868 1869 # All out push fails. synchronize all common
1869 1870 cheads = outgoing.commonheads
1870 1871 else:
1871 1872 # I want cheads = heads(::missingheads and ::commonheads)
1872 1873 # (missingheads is revs with secret changeset filtered out)
1873 1874 #
1874 1875 # This can be expressed as:
1875 1876 # cheads = ( (missingheads and ::commonheads)
1876 1877 # + (commonheads and ::missingheads))"
1877 1878 # )
1878 1879 #
1879 1880 # while trying to push we already computed the following:
1880 1881 # common = (::commonheads)
1881 1882 # missing = ((commonheads::missingheads) - commonheads)
1882 1883 #
1883 1884 # We can pick:
1884 1885 # * missingheads part of common (::commonheads)
1885 1886 common = set(outgoing.common)
1886 1887 cheads = [node for node in revs if node in common]
1887 1888 # and
1888 1889 # * commonheads parents on missing
1889 1890 revset = unfi.set('%ln and parents(roots(%ln))',
1890 1891 outgoing.commonheads,
1891 1892 outgoing.missing)
1892 1893 cheads.extend(c.node() for c in revset)
1893 1894 # even when we don't push, exchanging phase data is useful
1894 1895 remotephases = remote.listkeys('phases')
1895 1896 if not remotephases: # old server or public only repo
1896 1897 phases.advanceboundary(self, phases.public, cheads)
1897 1898 # don't push any phase data as there is nothing to push
1898 1899 else:
1899 1900 ana = phases.analyzeremotephases(self, cheads, remotephases)
1900 1901 pheads, droots = ana
1901 1902 ### Apply remote phase on local
1902 1903 if remotephases.get('publishing', False):
1903 1904 phases.advanceboundary(self, phases.public, cheads)
1904 1905 else: # publish = False
1905 1906 phases.advanceboundary(self, phases.public, pheads)
1906 1907 phases.advanceboundary(self, phases.draft, cheads)
1907 1908 ### Apply local phase on remote
1908 1909
1909 1910 # Get the list of all revs draft on remote by public here.
1910 1911 # XXX Beware that revset break if droots is not strictly
1911 1912 # XXX root we may want to ensure it is but it is costly
1912 1913 outdated = unfi.set('heads((%ln::%ln) and public())',
1913 1914 droots, cheads)
1914 1915 for newremotehead in outdated:
1915 1916 r = remote.pushkey('phases',
1916 1917 newremotehead.hex(),
1917 1918 str(phases.draft),
1918 1919 str(phases.public))
1919 1920 if not r:
1920 1921 self.ui.warn(_('updating %s to public failed!\n')
1921 1922 % newremotehead)
1922 1923 self.ui.debug('try to push obsolete markers to remote\n')
1923 1924 if (obsolete._enabled and self.obsstore and
1924 1925 'obsolete' in remote.listkeys('namespaces')):
1925 1926 rslts = []
1926 1927 remotedata = self.listkeys('obsolete')
1927 1928 for key in sorted(remotedata, reverse=True):
1928 1929 # reverse sort to ensure we end with dump0
1929 1930 data = remotedata[key]
1930 1931 rslts.append(remote.pushkey('obsolete', key, '', data))
1931 1932 if [r for r in rslts if not r]:
1932 1933 msg = _('failed to push some obsolete markers!\n')
1933 1934 self.ui.warn(msg)
1934 1935 finally:
1935 1936 if lock is not None:
1936 1937 lock.release()
1937 1938 finally:
1938 1939 locallock.release()
1939 1940
1940 1941 self.ui.debug("checking for updated bookmarks\n")
1941 1942 rb = remote.listkeys('bookmarks')
1942 1943 for k in rb.keys():
1943 1944 if k in unfi._bookmarks:
1944 1945 nr, nl = rb[k], hex(self._bookmarks[k])
1945 1946 if nr in unfi:
1946 1947 cr = unfi[nr]
1947 1948 cl = unfi[nl]
1948 1949 if bookmarks.validdest(unfi, cr, cl):
1949 1950 r = remote.pushkey('bookmarks', k, nr, nl)
1950 1951 if r:
1951 1952 self.ui.status(_("updating bookmark %s\n") % k)
1952 1953 else:
1953 1954 self.ui.warn(_('updating bookmark %s'
1954 1955 ' failed!\n') % k)
1955 1956
1956 1957 return ret
1957 1958
1958 1959 def changegroupinfo(self, nodes, source):
1959 1960 if self.ui.verbose or source == 'bundle':
1960 1961 self.ui.status(_("%d changesets found\n") % len(nodes))
1961 1962 if self.ui.debugflag:
1962 1963 self.ui.debug("list of changesets:\n")
1963 1964 for node in nodes:
1964 1965 self.ui.debug("%s\n" % hex(node))
1965 1966
1966 1967 def changegroupsubset(self, bases, heads, source):
1967 1968 """Compute a changegroup consisting of all the nodes that are
1968 1969 descendants of any of the bases and ancestors of any of the heads.
1969 1970 Return a chunkbuffer object whose read() method will return
1970 1971 successive changegroup chunks.
1971 1972
1972 1973 It is fairly complex as determining which filenodes and which
1973 1974 manifest nodes need to be included for the changeset to be complete
1974 1975 is non-trivial.
1975 1976
1976 1977 Another wrinkle is doing the reverse, figuring out which changeset in
1977 1978 the changegroup a particular filenode or manifestnode belongs to.
1978 1979 """
1979 1980 cl = self.changelog
1980 1981 if not bases:
1981 1982 bases = [nullid]
1982 1983 csets, bases, heads = cl.nodesbetween(bases, heads)
1983 1984 # We assume that all ancestors of bases are known
1984 1985 common = cl.ancestors([cl.rev(n) for n in bases])
1985 1986 return self._changegroupsubset(common, csets, heads, source)
1986 1987
1987 1988 def getlocalbundle(self, source, outgoing):
1988 1989 """Like getbundle, but taking a discovery.outgoing as an argument.
1989 1990
1990 1991 This is only implemented for local repos and reuses potentially
1991 1992 precomputed sets in outgoing."""
1992 1993 if not outgoing.missing:
1993 1994 return None
1994 1995 return self._changegroupsubset(outgoing.common,
1995 1996 outgoing.missing,
1996 1997 outgoing.missingheads,
1997 1998 source)
1998 1999
1999 2000 def getbundle(self, source, heads=None, common=None):
2000 2001 """Like changegroupsubset, but returns the set difference between the
2001 2002 ancestors of heads and the ancestors common.
2002 2003
2003 2004 If heads is None, use the local heads. If common is None, use [nullid].
2004 2005
2005 2006 The nodes in common might not all be known locally due to the way the
2006 2007 current discovery protocol works.
2007 2008 """
2008 2009 cl = self.changelog
2009 2010 if common:
2010 2011 hasnode = cl.hasnode
2011 2012 common = [n for n in common if hasnode(n)]
2012 2013 else:
2013 2014 common = [nullid]
2014 2015 if not heads:
2015 2016 heads = cl.heads()
2016 2017 return self.getlocalbundle(source,
2017 2018 discovery.outgoing(cl, common, heads))
2018 2019
2019 2020 @unfilteredmethod
2020 2021 def _changegroupsubset(self, commonrevs, csets, heads, source):
2021 2022
2022 2023 cl = self.changelog
2023 2024 mf = self.manifest
2024 2025 mfs = {} # needed manifests
2025 2026 fnodes = {} # needed file nodes
2026 2027 changedfiles = set()
2027 2028 fstate = ['', {}]
2028 2029 count = [0, 0]
2029 2030
2030 2031 # can we go through the fast path ?
2031 2032 heads.sort()
2032 2033 if heads == sorted(self.heads()):
2033 2034 return self._changegroup(csets, source)
2034 2035
2035 2036 # slow path
2036 2037 self.hook('preoutgoing', throw=True, source=source)
2037 2038 self.changegroupinfo(csets, source)
2038 2039
2039 2040 # filter any nodes that claim to be part of the known set
2040 2041 def prune(revlog, missing):
2041 2042 rr, rl = revlog.rev, revlog.linkrev
2042 2043 return [n for n in missing
2043 2044 if rl(rr(n)) not in commonrevs]
2044 2045
2045 2046 progress = self.ui.progress
2046 2047 _bundling = _('bundling')
2047 2048 _changesets = _('changesets')
2048 2049 _manifests = _('manifests')
2049 2050 _files = _('files')
2050 2051
2051 2052 def lookup(revlog, x):
2052 2053 if revlog == cl:
2053 2054 c = cl.read(x)
2054 2055 changedfiles.update(c[3])
2055 2056 mfs.setdefault(c[0], x)
2056 2057 count[0] += 1
2057 2058 progress(_bundling, count[0],
2058 2059 unit=_changesets, total=count[1])
2059 2060 return x
2060 2061 elif revlog == mf:
2061 2062 clnode = mfs[x]
2062 2063 mdata = mf.readfast(x)
2063 2064 for f, n in mdata.iteritems():
2064 2065 if f in changedfiles:
2065 2066 fnodes[f].setdefault(n, clnode)
2066 2067 count[0] += 1
2067 2068 progress(_bundling, count[0],
2068 2069 unit=_manifests, total=count[1])
2069 2070 return clnode
2070 2071 else:
2071 2072 progress(_bundling, count[0], item=fstate[0],
2072 2073 unit=_files, total=count[1])
2073 2074 return fstate[1][x]
2074 2075
2075 2076 bundler = changegroup.bundle10(lookup)
2076 2077 reorder = self.ui.config('bundle', 'reorder', 'auto')
2077 2078 if reorder == 'auto':
2078 2079 reorder = None
2079 2080 else:
2080 2081 reorder = util.parsebool(reorder)
2081 2082
2082 2083 def gengroup():
2083 2084 # Create a changenode group generator that will call our functions
2084 2085 # back to lookup the owning changenode and collect information.
2085 2086 count[:] = [0, len(csets)]
2086 2087 for chunk in cl.group(csets, bundler, reorder=reorder):
2087 2088 yield chunk
2088 2089 progress(_bundling, None)
2089 2090
2090 2091 # Create a generator for the manifestnodes that calls our lookup
2091 2092 # and data collection functions back.
2092 2093 for f in changedfiles:
2093 2094 fnodes[f] = {}
2094 2095 count[:] = [0, len(mfs)]
2095 2096 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2096 2097 yield chunk
2097 2098 progress(_bundling, None)
2098 2099
2099 2100 mfs.clear()
2100 2101
2101 2102 # Go through all our files in order sorted by name.
2102 2103 count[:] = [0, len(changedfiles)]
2103 2104 for fname in sorted(changedfiles):
2104 2105 filerevlog = self.file(fname)
2105 2106 if not len(filerevlog):
2106 2107 raise util.Abort(_("empty or missing revlog for %s")
2107 2108 % fname)
2108 2109 fstate[0] = fname
2109 2110 fstate[1] = fnodes.pop(fname, {})
2110 2111
2111 2112 nodelist = prune(filerevlog, fstate[1])
2112 2113 if nodelist:
2113 2114 count[0] += 1
2114 2115 yield bundler.fileheader(fname)
2115 2116 for chunk in filerevlog.group(nodelist, bundler, reorder):
2116 2117 yield chunk
2117 2118
2118 2119 # Signal that no more groups are left.
2119 2120 yield bundler.close()
2120 2121 progress(_bundling, None)
2121 2122
2122 2123 if csets:
2123 2124 self.hook('outgoing', node=hex(csets[0]), source=source)
2124 2125
2125 2126 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2126 2127
2127 2128 def changegroup(self, basenodes, source):
2128 2129 # to avoid a race we use changegroupsubset() (issue1320)
2129 2130 return self.changegroupsubset(basenodes, self.heads(), source)
2130 2131
2131 2132 @unfilteredmethod
2132 2133 def _changegroup(self, nodes, source):
2133 2134 """Compute the changegroup of all nodes that we have that a recipient
2134 2135 doesn't. Return a chunkbuffer object whose read() method will return
2135 2136 successive changegroup chunks.
2136 2137
2137 2138 This is much easier than the previous function as we can assume that
2138 2139 the recipient has any changenode we aren't sending them.
2139 2140
2140 2141 nodes is the set of nodes to send"""
2141 2142
2142 2143 cl = self.changelog
2143 2144 mf = self.manifest
2144 2145 mfs = {}
2145 2146 changedfiles = set()
2146 2147 fstate = ['']
2147 2148 count = [0, 0]
2148 2149
2149 2150 self.hook('preoutgoing', throw=True, source=source)
2150 2151 self.changegroupinfo(nodes, source)
2151 2152
2152 2153 revset = set([cl.rev(n) for n in nodes])
2153 2154
2154 2155 def gennodelst(log):
2155 2156 ln, llr = log.node, log.linkrev
2156 2157 return [ln(r) for r in log if llr(r) in revset]
2157 2158
2158 2159 progress = self.ui.progress
2159 2160 _bundling = _('bundling')
2160 2161 _changesets = _('changesets')
2161 2162 _manifests = _('manifests')
2162 2163 _files = _('files')
2163 2164
2164 2165 def lookup(revlog, x):
2165 2166 if revlog == cl:
2166 2167 c = cl.read(x)
2167 2168 changedfiles.update(c[3])
2168 2169 mfs.setdefault(c[0], x)
2169 2170 count[0] += 1
2170 2171 progress(_bundling, count[0],
2171 2172 unit=_changesets, total=count[1])
2172 2173 return x
2173 2174 elif revlog == mf:
2174 2175 count[0] += 1
2175 2176 progress(_bundling, count[0],
2176 2177 unit=_manifests, total=count[1])
2177 2178 return cl.node(revlog.linkrev(revlog.rev(x)))
2178 2179 else:
2179 2180 progress(_bundling, count[0], item=fstate[0],
2180 2181 total=count[1], unit=_files)
2181 2182 return cl.node(revlog.linkrev(revlog.rev(x)))
2182 2183
2183 2184 bundler = changegroup.bundle10(lookup)
2184 2185 reorder = self.ui.config('bundle', 'reorder', 'auto')
2185 2186 if reorder == 'auto':
2186 2187 reorder = None
2187 2188 else:
2188 2189 reorder = util.parsebool(reorder)
2189 2190
2190 2191 def gengroup():
2191 2192 '''yield a sequence of changegroup chunks (strings)'''
2192 2193 # construct a list of all changed files
2193 2194
2194 2195 count[:] = [0, len(nodes)]
2195 2196 for chunk in cl.group(nodes, bundler, reorder=reorder):
2196 2197 yield chunk
2197 2198 progress(_bundling, None)
2198 2199
2199 2200 count[:] = [0, len(mfs)]
2200 2201 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2201 2202 yield chunk
2202 2203 progress(_bundling, None)
2203 2204
2204 2205 count[:] = [0, len(changedfiles)]
2205 2206 for fname in sorted(changedfiles):
2206 2207 filerevlog = self.file(fname)
2207 2208 if not len(filerevlog):
2208 2209 raise util.Abort(_("empty or missing revlog for %s")
2209 2210 % fname)
2210 2211 fstate[0] = fname
2211 2212 nodelist = gennodelst(filerevlog)
2212 2213 if nodelist:
2213 2214 count[0] += 1
2214 2215 yield bundler.fileheader(fname)
2215 2216 for chunk in filerevlog.group(nodelist, bundler, reorder):
2216 2217 yield chunk
2217 2218 yield bundler.close()
2218 2219 progress(_bundling, None)
2219 2220
2220 2221 if nodes:
2221 2222 self.hook('outgoing', node=hex(nodes[0]), source=source)
2222 2223
2223 2224 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2224 2225
2225 2226 @unfilteredmethod
2226 2227 def addchangegroup(self, source, srctype, url, emptyok=False):
2227 2228 """Add the changegroup returned by source.read() to this repo.
2228 2229 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2229 2230 the URL of the repo where this changegroup is coming from.
2230 2231
2231 2232 Return an integer summarizing the change to this repo:
2232 2233 - nothing changed or no source: 0
2233 2234 - more heads than before: 1+added heads (2..n)
2234 2235 - fewer heads than before: -1-removed heads (-2..-n)
2235 2236 - number of heads stays the same: 1
2236 2237 """
2237 2238 def csmap(x):
2238 2239 self.ui.debug("add changeset %s\n" % short(x))
2239 2240 return len(cl)
2240 2241
2241 2242 def revmap(x):
2242 2243 return cl.rev(x)
2243 2244
2244 2245 if not source:
2245 2246 return 0
2246 2247
2247 2248 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2248 2249
2249 2250 changesets = files = revisions = 0
2250 2251 efiles = set()
2251 2252
2252 2253 # write changelog data to temp files so concurrent readers will not see
2253 2254 # inconsistent view
2254 2255 cl = self.changelog
2255 2256 cl.delayupdate()
2256 2257 oldheads = cl.heads()
2257 2258
2258 2259 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2259 2260 try:
2260 2261 trp = weakref.proxy(tr)
2261 2262 # pull off the changeset group
2262 2263 self.ui.status(_("adding changesets\n"))
2263 2264 clstart = len(cl)
2264 2265 class prog(object):
2265 2266 step = _('changesets')
2266 2267 count = 1
2267 2268 ui = self.ui
2268 2269 total = None
2269 2270 def __call__(self):
2270 2271 self.ui.progress(self.step, self.count, unit=_('chunks'),
2271 2272 total=self.total)
2272 2273 self.count += 1
2273 2274 pr = prog()
2274 2275 source.callback = pr
2275 2276
2276 2277 source.changelogheader()
2277 2278 srccontent = cl.addgroup(source, csmap, trp)
2278 2279 if not (srccontent or emptyok):
2279 2280 raise util.Abort(_("received changelog group is empty"))
2280 2281 clend = len(cl)
2281 2282 changesets = clend - clstart
2282 2283 for c in xrange(clstart, clend):
2283 2284 efiles.update(self[c].files())
2284 2285 efiles = len(efiles)
2285 2286 self.ui.progress(_('changesets'), None)
2286 2287
2287 2288 # pull off the manifest group
2288 2289 self.ui.status(_("adding manifests\n"))
2289 2290 pr.step = _('manifests')
2290 2291 pr.count = 1
2291 2292 pr.total = changesets # manifests <= changesets
2292 2293 # no need to check for empty manifest group here:
2293 2294 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2294 2295 # no new manifest will be created and the manifest group will
2295 2296 # be empty during the pull
2296 2297 source.manifestheader()
2297 2298 self.manifest.addgroup(source, revmap, trp)
2298 2299 self.ui.progress(_('manifests'), None)
2299 2300
2300 2301 needfiles = {}
2301 2302 if self.ui.configbool('server', 'validate', default=False):
2302 2303 # validate incoming csets have their manifests
2303 2304 for cset in xrange(clstart, clend):
2304 2305 mfest = self.changelog.read(self.changelog.node(cset))[0]
2305 2306 mfest = self.manifest.readdelta(mfest)
2306 2307 # store file nodes we must see
2307 2308 for f, n in mfest.iteritems():
2308 2309 needfiles.setdefault(f, set()).add(n)
2309 2310
2310 2311 # process the files
2311 2312 self.ui.status(_("adding file changes\n"))
2312 2313 pr.step = _('files')
2313 2314 pr.count = 1
2314 2315 pr.total = efiles
2315 2316 source.callback = None
2316 2317
2317 2318 while True:
2318 2319 chunkdata = source.filelogheader()
2319 2320 if not chunkdata:
2320 2321 break
2321 2322 f = chunkdata["filename"]
2322 2323 self.ui.debug("adding %s revisions\n" % f)
2323 2324 pr()
2324 2325 fl = self.file(f)
2325 2326 o = len(fl)
2326 2327 if not fl.addgroup(source, revmap, trp):
2327 2328 raise util.Abort(_("received file revlog group is empty"))
2328 2329 revisions += len(fl) - o
2329 2330 files += 1
2330 2331 if f in needfiles:
2331 2332 needs = needfiles[f]
2332 2333 for new in xrange(o, len(fl)):
2333 2334 n = fl.node(new)
2334 2335 if n in needs:
2335 2336 needs.remove(n)
2336 2337 if not needs:
2337 2338 del needfiles[f]
2338 2339 self.ui.progress(_('files'), None)
2339 2340
2340 2341 for f, needs in needfiles.iteritems():
2341 2342 fl = self.file(f)
2342 2343 for n in needs:
2343 2344 try:
2344 2345 fl.rev(n)
2345 2346 except error.LookupError:
2346 2347 raise util.Abort(
2347 2348 _('missing file data for %s:%s - run hg verify') %
2348 2349 (f, hex(n)))
2349 2350
2350 2351 dh = 0
2351 2352 if oldheads:
2352 2353 heads = cl.heads()
2353 2354 dh = len(heads) - len(oldheads)
2354 2355 for h in heads:
2355 2356 if h not in oldheads and self[h].closesbranch():
2356 2357 dh -= 1
2357 2358 htext = ""
2358 2359 if dh:
2359 2360 htext = _(" (%+d heads)") % dh
2360 2361
2361 2362 self.ui.status(_("added %d changesets"
2362 2363 " with %d changes to %d files%s\n")
2363 2364 % (changesets, revisions, files, htext))
2364 2365 self.invalidatevolatilesets()
2365 2366
2366 2367 if changesets > 0:
2367 2368 p = lambda: cl.writepending() and self.root or ""
2368 2369 self.hook('pretxnchangegroup', throw=True,
2369 2370 node=hex(cl.node(clstart)), source=srctype,
2370 2371 url=url, pending=p)
2371 2372
2372 2373 added = [cl.node(r) for r in xrange(clstart, clend)]
2373 2374 publishing = self.ui.configbool('phases', 'publish', True)
2374 2375 if srctype == 'push':
2375 2376 # Old server can not push the boundary themself.
2376 2377 # New server won't push the boundary if changeset already
2377 2378 # existed locally as secrete
2378 2379 #
2379 2380 # We should not use added here but the list of all change in
2380 2381 # the bundle
2381 2382 if publishing:
2382 2383 phases.advanceboundary(self, phases.public, srccontent)
2383 2384 else:
2384 2385 phases.advanceboundary(self, phases.draft, srccontent)
2385 2386 phases.retractboundary(self, phases.draft, added)
2386 2387 elif srctype != 'strip':
2387 2388 # publishing only alter behavior during push
2388 2389 #
2389 2390 # strip should not touch boundary at all
2390 2391 phases.retractboundary(self, phases.draft, added)
2391 2392
2392 2393 # make changelog see real files again
2393 2394 cl.finalize(trp)
2394 2395
2395 2396 tr.close()
2396 2397
2397 2398 if changesets > 0:
2398 2399 if srctype != 'strip':
2399 2400 # During strip, branchcache is invalid but coming call to
2400 2401 # `destroyed` will repair it.
2401 2402 # In other case we can safely update cache on disk.
2402 2403 branchmap.updatecache(self)
2403 2404 def runhooks():
2404 2405 # forcefully update the on-disk branch cache
2405 2406 self.ui.debug("updating the branch cache\n")
2406 2407 self.hook("changegroup", node=hex(cl.node(clstart)),
2407 2408 source=srctype, url=url)
2408 2409
2409 2410 for n in added:
2410 2411 self.hook("incoming", node=hex(n), source=srctype,
2411 2412 url=url)
2412 2413 self._afterlock(runhooks)
2413 2414
2414 2415 finally:
2415 2416 tr.release()
2416 2417 # never return 0 here:
2417 2418 if dh < 0:
2418 2419 return dh - 1
2419 2420 else:
2420 2421 return dh + 1
2421 2422
2422 2423 def stream_in(self, remote, requirements):
2423 2424 lock = self.lock()
2424 2425 try:
2425 2426 # Save remote branchmap. We will use it later
2426 2427 # to speed up branchcache creation
2427 2428 rbranchmap = None
2428 2429 if remote.capable("branchmap"):
2429 2430 rbranchmap = remote.branchmap()
2430 2431
2431 2432 fp = remote.stream_out()
2432 2433 l = fp.readline()
2433 2434 try:
2434 2435 resp = int(l)
2435 2436 except ValueError:
2436 2437 raise error.ResponseError(
2437 2438 _('unexpected response from remote server:'), l)
2438 2439 if resp == 1:
2439 2440 raise util.Abort(_('operation forbidden by server'))
2440 2441 elif resp == 2:
2441 2442 raise util.Abort(_('locking the remote repository failed'))
2442 2443 elif resp != 0:
2443 2444 raise util.Abort(_('the server sent an unknown error code'))
2444 2445 self.ui.status(_('streaming all changes\n'))
2445 2446 l = fp.readline()
2446 2447 try:
2447 2448 total_files, total_bytes = map(int, l.split(' ', 1))
2448 2449 except (ValueError, TypeError):
2449 2450 raise error.ResponseError(
2450 2451 _('unexpected response from remote server:'), l)
2451 2452 self.ui.status(_('%d files to transfer, %s of data\n') %
2452 2453 (total_files, util.bytecount(total_bytes)))
2453 2454 handled_bytes = 0
2454 2455 self.ui.progress(_('clone'), 0, total=total_bytes)
2455 2456 start = time.time()
2456 2457 for i in xrange(total_files):
2457 2458 # XXX doesn't support '\n' or '\r' in filenames
2458 2459 l = fp.readline()
2459 2460 try:
2460 2461 name, size = l.split('\0', 1)
2461 2462 size = int(size)
2462 2463 except (ValueError, TypeError):
2463 2464 raise error.ResponseError(
2464 2465 _('unexpected response from remote server:'), l)
2465 2466 if self.ui.debugflag:
2466 2467 self.ui.debug('adding %s (%s)\n' %
2467 2468 (name, util.bytecount(size)))
2468 2469 # for backwards compat, name was partially encoded
2469 2470 ofp = self.sopener(store.decodedir(name), 'w')
2470 2471 for chunk in util.filechunkiter(fp, limit=size):
2471 2472 handled_bytes += len(chunk)
2472 2473 self.ui.progress(_('clone'), handled_bytes,
2473 2474 total=total_bytes)
2474 2475 ofp.write(chunk)
2475 2476 ofp.close()
2476 2477 elapsed = time.time() - start
2477 2478 if elapsed <= 0:
2478 2479 elapsed = 0.001
2479 2480 self.ui.progress(_('clone'), None)
2480 2481 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2481 2482 (util.bytecount(total_bytes), elapsed,
2482 2483 util.bytecount(total_bytes / elapsed)))
2483 2484
2484 2485 # new requirements = old non-format requirements +
2485 2486 # new format-related
2486 2487 # requirements from the streamed-in repository
2487 2488 requirements.update(set(self.requirements) - self.supportedformats)
2488 2489 self._applyrequirements(requirements)
2489 2490 self._writerequirements()
2490 2491
2491 2492 if rbranchmap:
2492 2493 rbheads = []
2493 2494 for bheads in rbranchmap.itervalues():
2494 2495 rbheads.extend(bheads)
2495 2496
2496 2497 if rbheads:
2497 2498 rtiprev = max((int(self.changelog.rev(node))
2498 2499 for node in rbheads))
2499 2500 cache = branchmap.branchcache(rbranchmap,
2500 2501 self[rtiprev].node(),
2501 2502 rtiprev)
2502 2503 self._branchcache = cache
2503 2504 cache.write(self)
2504 2505 self.invalidate()
2505 2506 return len(self.heads()) + 1
2506 2507 finally:
2507 2508 lock.release()
2508 2509
2509 2510 def clone(self, remote, heads=[], stream=False):
2510 2511 '''clone remote repository.
2511 2512
2512 2513 keyword arguments:
2513 2514 heads: list of revs to clone (forces use of pull)
2514 2515 stream: use streaming clone if possible'''
2515 2516
2516 2517 # now, all clients that can request uncompressed clones can
2517 2518 # read repo formats supported by all servers that can serve
2518 2519 # them.
2519 2520
2520 2521 # if revlog format changes, client will have to check version
2521 2522 # and format flags on "stream" capability, and use
2522 2523 # uncompressed only if compatible.
2523 2524
2524 2525 if not stream:
2525 2526 # if the server explicitly prefers to stream (for fast LANs)
2526 2527 stream = remote.capable('stream-preferred')
2527 2528
2528 2529 if stream and not heads:
2529 2530 # 'stream' means remote revlog format is revlogv1 only
2530 2531 if remote.capable('stream'):
2531 2532 return self.stream_in(remote, set(('revlogv1',)))
2532 2533 # otherwise, 'streamreqs' contains the remote revlog format
2533 2534 streamreqs = remote.capable('streamreqs')
2534 2535 if streamreqs:
2535 2536 streamreqs = set(streamreqs.split(','))
2536 2537 # if we support it, stream in and adjust our requirements
2537 2538 if not streamreqs - self.supportedformats:
2538 2539 return self.stream_in(remote, streamreqs)
2539 2540 return self.pull(remote, heads)
2540 2541
2541 2542 def pushkey(self, namespace, key, old, new):
2542 2543 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2543 2544 old=old, new=new)
2544 2545 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2545 2546 ret = pushkey.push(self, namespace, key, old, new)
2546 2547 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2547 2548 ret=ret)
2548 2549 return ret
2549 2550
2550 2551 def listkeys(self, namespace):
2551 2552 self.hook('prelistkeys', throw=True, namespace=namespace)
2552 2553 self.ui.debug('listing keys for "%s"\n' % namespace)
2553 2554 values = pushkey.list(self, namespace)
2554 2555 self.hook('listkeys', namespace=namespace, values=values)
2555 2556 return values
2556 2557
2557 2558 def debugwireargs(self, one, two, three=None, four=None, five=None):
2558 2559 '''used to test argument passing over the wire'''
2559 2560 return "%s %s %s %s %s" % (one, two, three, four, five)
2560 2561
2561 2562 def savecommitmessage(self, text):
2562 2563 fp = self.opener('last-message.txt', 'wb')
2563 2564 try:
2564 2565 fp.write(text)
2565 2566 finally:
2566 2567 fp.close()
2567 2568 return self.pathto(fp.name[len(self.root) + 1:])
2568 2569
2569 2570 # used to avoid circular references so destructors work
2570 2571 def aftertrans(files):
2571 2572 renamefiles = [tuple(t) for t in files]
2572 2573 def a():
2573 2574 for src, dest in renamefiles:
2574 2575 try:
2575 2576 util.rename(src, dest)
2576 2577 except OSError: # journal file does not yet exist
2577 2578 pass
2578 2579 return a
2579 2580
2580 2581 def undoname(fn):
2581 2582 base, name = os.path.split(fn)
2582 2583 assert name.startswith('journal')
2583 2584 return os.path.join(base, name.replace('journal', 'undo', 1))
2584 2585
2585 2586 def instance(ui, path, create):
2586 2587 return localrepository(ui, util.urllocalpath(path), create)
2587 2588
2588 2589 def islocal(path):
2589 2590 return True
General Comments 0
You need to be logged in to leave comments. Login now