##// END OF EJS Templates
localrepo: use "vfs.rename()" instead of "util.rename()"...
FUJIWARA Katsunori -
r18952:8086b530 default
parent child Browse files
Show More
@@ -1,2594 +1,2596
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if a repo has an unfilteredpropertycache value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo.filtered('served')
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return self._repo.branchmap()
95 95
96 96 def heads(self):
97 97 return self._repo.heads()
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 150 filtername = None
151 151
152 152 def _baserequirements(self, create):
153 153 return self.requirements[:]
154 154
155 155 def __init__(self, baseui, path=None, create=False):
156 156 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
157 157 self.wopener = self.wvfs
158 158 self.root = self.wvfs.base
159 159 self.path = self.wvfs.join(".hg")
160 160 self.origroot = path
161 161 self.auditor = scmutil.pathauditor(self.root, self._checknested)
162 162 self.vfs = scmutil.vfs(self.path)
163 163 self.opener = self.vfs
164 164 self.baseui = baseui
165 165 self.ui = baseui.copy()
166 166 # A list of callback to shape the phase if no data were found.
167 167 # Callback are in the form: func(repo, roots) --> processed root.
168 168 # This list it to be filled by extension during repo setup
169 169 self._phasedefaults = []
170 170 try:
171 171 self.ui.readconfig(self.join("hgrc"), self.root)
172 172 extensions.loadall(self.ui)
173 173 except IOError:
174 174 pass
175 175
176 176 if not self.vfs.isdir():
177 177 if create:
178 178 if not self.wvfs.exists():
179 179 self.wvfs.makedirs()
180 180 self.vfs.makedir(notindexed=True)
181 181 requirements = self._baserequirements(create)
182 182 if self.ui.configbool('format', 'usestore', True):
183 183 self.vfs.mkdir("store")
184 184 requirements.append("store")
185 185 if self.ui.configbool('format', 'usefncache', True):
186 186 requirements.append("fncache")
187 187 if self.ui.configbool('format', 'dotencode', True):
188 188 requirements.append('dotencode')
189 189 # create an invalid changelog
190 190 self.vfs.append(
191 191 "00changelog.i",
192 192 '\0\0\0\2' # represents revlogv2
193 193 ' dummy changelog to prevent using the old repo layout'
194 194 )
195 195 if self.ui.configbool('format', 'generaldelta', False):
196 196 requirements.append("generaldelta")
197 197 requirements = set(requirements)
198 198 else:
199 199 raise error.RepoError(_("repository %s not found") % path)
200 200 elif create:
201 201 raise error.RepoError(_("repository %s already exists") % path)
202 202 else:
203 203 try:
204 204 requirements = scmutil.readrequires(self.vfs, self.supported)
205 205 except IOError, inst:
206 206 if inst.errno != errno.ENOENT:
207 207 raise
208 208 requirements = set()
209 209
210 210 self.sharedpath = self.path
211 211 try:
212 212 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
213 213 realpath=True)
214 214 s = vfs.base
215 215 if not vfs.exists():
216 216 raise error.RepoError(
217 217 _('.hg/sharedpath points to nonexistent directory %s') % s)
218 218 self.sharedpath = s
219 219 except IOError, inst:
220 220 if inst.errno != errno.ENOENT:
221 221 raise
222 222
223 223 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
224 224 self.spath = self.store.path
225 225 self.svfs = self.store.vfs
226 226 self.sopener = self.svfs
227 227 self.sjoin = self.store.join
228 228 self.vfs.createmode = self.store.createmode
229 229 self._applyrequirements(requirements)
230 230 if create:
231 231 self._writerequirements()
232 232
233 233
234 234 self._branchcaches = {}
235 235 self.filterpats = {}
236 236 self._datafilters = {}
237 237 self._transref = self._lockref = self._wlockref = None
238 238
239 239 # A cache for various files under .hg/ that tracks file changes,
240 240 # (used by the filecache decorator)
241 241 #
242 242 # Maps a property name to its util.filecacheentry
243 243 self._filecache = {}
244 244
245 245 # hold sets of revision to be filtered
246 246 # should be cleared when something might have changed the filter value:
247 247 # - new changesets,
248 248 # - phase change,
249 249 # - new obsolescence marker,
250 250 # - working directory parent change,
251 251 # - bookmark changes
252 252 self.filteredrevcache = {}
253 253
254 254 def close(self):
255 255 pass
256 256
257 257 def _restrictcapabilities(self, caps):
258 258 return caps
259 259
260 260 def _applyrequirements(self, requirements):
261 261 self.requirements = requirements
262 262 self.sopener.options = dict((r, 1) for r in requirements
263 263 if r in self.openerreqs)
264 264
265 265 def _writerequirements(self):
266 266 reqfile = self.opener("requires", "w")
267 267 for r in sorted(self.requirements):
268 268 reqfile.write("%s\n" % r)
269 269 reqfile.close()
270 270
271 271 def _checknested(self, path):
272 272 """Determine if path is a legal nested repository."""
273 273 if not path.startswith(self.root):
274 274 return False
275 275 subpath = path[len(self.root) + 1:]
276 276 normsubpath = util.pconvert(subpath)
277 277
278 278 # XXX: Checking against the current working copy is wrong in
279 279 # the sense that it can reject things like
280 280 #
281 281 # $ hg cat -r 10 sub/x.txt
282 282 #
283 283 # if sub/ is no longer a subrepository in the working copy
284 284 # parent revision.
285 285 #
286 286 # However, it can of course also allow things that would have
287 287 # been rejected before, such as the above cat command if sub/
288 288 # is a subrepository now, but was a normal directory before.
289 289 # The old path auditor would have rejected by mistake since it
290 290 # panics when it sees sub/.hg/.
291 291 #
292 292 # All in all, checking against the working copy seems sensible
293 293 # since we want to prevent access to nested repositories on
294 294 # the filesystem *now*.
295 295 ctx = self[None]
296 296 parts = util.splitpath(subpath)
297 297 while parts:
298 298 prefix = '/'.join(parts)
299 299 if prefix in ctx.substate:
300 300 if prefix == normsubpath:
301 301 return True
302 302 else:
303 303 sub = ctx.sub(prefix)
304 304 return sub.checknested(subpath[len(prefix) + 1:])
305 305 else:
306 306 parts.pop()
307 307 return False
308 308
309 309 def peer(self):
310 310 return localpeer(self) # not cached to avoid reference cycle
311 311
312 312 def unfiltered(self):
313 313 """Return unfiltered version of the repository
314 314
315 315 Intended to be overwritten by filtered repo."""
316 316 return self
317 317
318 318 def filtered(self, name):
319 319 """Return a filtered version of a repository"""
320 320 # build a new class with the mixin and the current class
321 321 # (possibly subclass of the repo)
322 322 class proxycls(repoview.repoview, self.unfiltered().__class__):
323 323 pass
324 324 return proxycls(self, name)
325 325
326 326 @repofilecache('bookmarks')
327 327 def _bookmarks(self):
328 328 return bookmarks.bmstore(self)
329 329
330 330 @repofilecache('bookmarks.current')
331 331 def _bookmarkcurrent(self):
332 332 return bookmarks.readcurrent(self)
333 333
334 334 def bookmarkheads(self, bookmark):
335 335 name = bookmark.split('@', 1)[0]
336 336 heads = []
337 337 for mark, n in self._bookmarks.iteritems():
338 338 if mark.split('@', 1)[0] == name:
339 339 heads.append(n)
340 340 return heads
341 341
342 342 @storecache('phaseroots')
343 343 def _phasecache(self):
344 344 return phases.phasecache(self, self._phasedefaults)
345 345
346 346 @storecache('obsstore')
347 347 def obsstore(self):
348 348 store = obsolete.obsstore(self.sopener)
349 349 if store and not obsolete._enabled:
350 350 # message is rare enough to not be translated
351 351 msg = 'obsolete feature not enabled but %i markers found!\n'
352 352 self.ui.warn(msg % len(list(store)))
353 353 return store
354 354
355 355 @storecache('00changelog.i')
356 356 def changelog(self):
357 357 c = changelog.changelog(self.sopener)
358 358 if 'HG_PENDING' in os.environ:
359 359 p = os.environ['HG_PENDING']
360 360 if p.startswith(self.root):
361 361 c.readpending('00changelog.i.a')
362 362 return c
363 363
364 364 @storecache('00manifest.i')
365 365 def manifest(self):
366 366 return manifest.manifest(self.sopener)
367 367
368 368 @repofilecache('dirstate')
369 369 def dirstate(self):
370 370 warned = [0]
371 371 def validate(node):
372 372 try:
373 373 self.changelog.rev(node)
374 374 return node
375 375 except error.LookupError:
376 376 if not warned[0]:
377 377 warned[0] = True
378 378 self.ui.warn(_("warning: ignoring unknown"
379 379 " working parent %s!\n") % short(node))
380 380 return nullid
381 381
382 382 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
383 383
384 384 def __getitem__(self, changeid):
385 385 if changeid is None:
386 386 return context.workingctx(self)
387 387 return context.changectx(self, changeid)
388 388
389 389 def __contains__(self, changeid):
390 390 try:
391 391 return bool(self.lookup(changeid))
392 392 except error.RepoLookupError:
393 393 return False
394 394
395 395 def __nonzero__(self):
396 396 return True
397 397
398 398 def __len__(self):
399 399 return len(self.changelog)
400 400
401 401 def __iter__(self):
402 402 return iter(self.changelog)
403 403
404 404 def revs(self, expr, *args):
405 405 '''Return a list of revisions matching the given revset'''
406 406 expr = revset.formatspec(expr, *args)
407 407 m = revset.match(None, expr)
408 408 return [r for r in m(self, list(self))]
409 409
410 410 def set(self, expr, *args):
411 411 '''
412 412 Yield a context for each matching revision, after doing arg
413 413 replacement via revset.formatspec
414 414 '''
415 415 for r in self.revs(expr, *args):
416 416 yield self[r]
417 417
418 418 def url(self):
419 419 return 'file:' + self.root
420 420
421 421 def hook(self, name, throw=False, **args):
422 422 return hook.hook(self.ui, self, name, throw, **args)
423 423
424 424 @unfilteredmethod
425 425 def _tag(self, names, node, message, local, user, date, extra={}):
426 426 if isinstance(names, str):
427 427 names = (names,)
428 428
429 429 branches = self.branchmap()
430 430 for name in names:
431 431 self.hook('pretag', throw=True, node=hex(node), tag=name,
432 432 local=local)
433 433 if name in branches:
434 434 self.ui.warn(_("warning: tag %s conflicts with existing"
435 435 " branch name\n") % name)
436 436
437 437 def writetags(fp, names, munge, prevtags):
438 438 fp.seek(0, 2)
439 439 if prevtags and prevtags[-1] != '\n':
440 440 fp.write('\n')
441 441 for name in names:
442 442 m = munge and munge(name) or name
443 443 if (self._tagscache.tagtypes and
444 444 name in self._tagscache.tagtypes):
445 445 old = self.tags().get(name, nullid)
446 446 fp.write('%s %s\n' % (hex(old), m))
447 447 fp.write('%s %s\n' % (hex(node), m))
448 448 fp.close()
449 449
450 450 prevtags = ''
451 451 if local:
452 452 try:
453 453 fp = self.opener('localtags', 'r+')
454 454 except IOError:
455 455 fp = self.opener('localtags', 'a')
456 456 else:
457 457 prevtags = fp.read()
458 458
459 459 # local tags are stored in the current charset
460 460 writetags(fp, names, None, prevtags)
461 461 for name in names:
462 462 self.hook('tag', node=hex(node), tag=name, local=local)
463 463 return
464 464
465 465 try:
466 466 fp = self.wfile('.hgtags', 'rb+')
467 467 except IOError, e:
468 468 if e.errno != errno.ENOENT:
469 469 raise
470 470 fp = self.wfile('.hgtags', 'ab')
471 471 else:
472 472 prevtags = fp.read()
473 473
474 474 # committed tags are stored in UTF-8
475 475 writetags(fp, names, encoding.fromlocal, prevtags)
476 476
477 477 fp.close()
478 478
479 479 self.invalidatecaches()
480 480
481 481 if '.hgtags' not in self.dirstate:
482 482 self[None].add(['.hgtags'])
483 483
484 484 m = matchmod.exact(self.root, '', ['.hgtags'])
485 485 tagnode = self.commit(message, user, date, extra=extra, match=m)
486 486
487 487 for name in names:
488 488 self.hook('tag', node=hex(node), tag=name, local=local)
489 489
490 490 return tagnode
491 491
492 492 def tag(self, names, node, message, local, user, date):
493 493 '''tag a revision with one or more symbolic names.
494 494
495 495 names is a list of strings or, when adding a single tag, names may be a
496 496 string.
497 497
498 498 if local is True, the tags are stored in a per-repository file.
499 499 otherwise, they are stored in the .hgtags file, and a new
500 500 changeset is committed with the change.
501 501
502 502 keyword arguments:
503 503
504 504 local: whether to store tags in non-version-controlled file
505 505 (default False)
506 506
507 507 message: commit message to use if committing
508 508
509 509 user: name of user to use if committing
510 510
511 511 date: date tuple to use if committing'''
512 512
513 513 if not local:
514 514 for x in self.status()[:5]:
515 515 if '.hgtags' in x:
516 516 raise util.Abort(_('working copy of .hgtags is changed '
517 517 '(please commit .hgtags manually)'))
518 518
519 519 self.tags() # instantiate the cache
520 520 self._tag(names, node, message, local, user, date)
521 521
522 522 @filteredpropertycache
523 523 def _tagscache(self):
524 524 '''Returns a tagscache object that contains various tags related
525 525 caches.'''
526 526
527 527 # This simplifies its cache management by having one decorated
528 528 # function (this one) and the rest simply fetch things from it.
529 529 class tagscache(object):
530 530 def __init__(self):
531 531 # These two define the set of tags for this repository. tags
532 532 # maps tag name to node; tagtypes maps tag name to 'global' or
533 533 # 'local'. (Global tags are defined by .hgtags across all
534 534 # heads, and local tags are defined in .hg/localtags.)
535 535 # They constitute the in-memory cache of tags.
536 536 self.tags = self.tagtypes = None
537 537
538 538 self.nodetagscache = self.tagslist = None
539 539
540 540 cache = tagscache()
541 541 cache.tags, cache.tagtypes = self._findtags()
542 542
543 543 return cache
544 544
545 545 def tags(self):
546 546 '''return a mapping of tag to node'''
547 547 t = {}
548 548 if self.changelog.filteredrevs:
549 549 tags, tt = self._findtags()
550 550 else:
551 551 tags = self._tagscache.tags
552 552 for k, v in tags.iteritems():
553 553 try:
554 554 # ignore tags to unknown nodes
555 555 self.changelog.rev(v)
556 556 t[k] = v
557 557 except (error.LookupError, ValueError):
558 558 pass
559 559 return t
560 560
561 561 def _findtags(self):
562 562 '''Do the hard work of finding tags. Return a pair of dicts
563 563 (tags, tagtypes) where tags maps tag name to node, and tagtypes
564 564 maps tag name to a string like \'global\' or \'local\'.
565 565 Subclasses or extensions are free to add their own tags, but
566 566 should be aware that the returned dicts will be retained for the
567 567 duration of the localrepo object.'''
568 568
569 569 # XXX what tagtype should subclasses/extensions use? Currently
570 570 # mq and bookmarks add tags, but do not set the tagtype at all.
571 571 # Should each extension invent its own tag type? Should there
572 572 # be one tagtype for all such "virtual" tags? Or is the status
573 573 # quo fine?
574 574
575 575 alltags = {} # map tag name to (node, hist)
576 576 tagtypes = {}
577 577
578 578 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
579 579 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
580 580
581 581 # Build the return dicts. Have to re-encode tag names because
582 582 # the tags module always uses UTF-8 (in order not to lose info
583 583 # writing to the cache), but the rest of Mercurial wants them in
584 584 # local encoding.
585 585 tags = {}
586 586 for (name, (node, hist)) in alltags.iteritems():
587 587 if node != nullid:
588 588 tags[encoding.tolocal(name)] = node
589 589 tags['tip'] = self.changelog.tip()
590 590 tagtypes = dict([(encoding.tolocal(name), value)
591 591 for (name, value) in tagtypes.iteritems()])
592 592 return (tags, tagtypes)
593 593
594 594 def tagtype(self, tagname):
595 595 '''
596 596 return the type of the given tag. result can be:
597 597
598 598 'local' : a local tag
599 599 'global' : a global tag
600 600 None : tag does not exist
601 601 '''
602 602
603 603 return self._tagscache.tagtypes.get(tagname)
604 604
605 605 def tagslist(self):
606 606 '''return a list of tags ordered by revision'''
607 607 if not self._tagscache.tagslist:
608 608 l = []
609 609 for t, n in self.tags().iteritems():
610 610 r = self.changelog.rev(n)
611 611 l.append((r, t, n))
612 612 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
613 613
614 614 return self._tagscache.tagslist
615 615
616 616 def nodetags(self, node):
617 617 '''return the tags associated with a node'''
618 618 if not self._tagscache.nodetagscache:
619 619 nodetagscache = {}
620 620 for t, n in self._tagscache.tags.iteritems():
621 621 nodetagscache.setdefault(n, []).append(t)
622 622 for tags in nodetagscache.itervalues():
623 623 tags.sort()
624 624 self._tagscache.nodetagscache = nodetagscache
625 625 return self._tagscache.nodetagscache.get(node, [])
626 626
627 627 def nodebookmarks(self, node):
628 628 marks = []
629 629 for bookmark, n in self._bookmarks.iteritems():
630 630 if n == node:
631 631 marks.append(bookmark)
632 632 return sorted(marks)
633 633
634 634 def branchmap(self):
635 635 '''returns a dictionary {branch: [branchheads]}'''
636 636 branchmap.updatecache(self)
637 637 return self._branchcaches[self.filtername]
638 638
639 639
640 640 def _branchtip(self, heads):
641 641 '''return the tipmost branch head in heads'''
642 642 tip = heads[-1]
643 643 for h in reversed(heads):
644 644 if not self[h].closesbranch():
645 645 tip = h
646 646 break
647 647 return tip
648 648
649 649 def branchtip(self, branch):
650 650 '''return the tip node for a given branch'''
651 651 if branch not in self.branchmap():
652 652 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
653 653 return self._branchtip(self.branchmap()[branch])
654 654
655 655 def branchtags(self):
656 656 '''return a dict where branch names map to the tipmost head of
657 657 the branch, open heads come before closed'''
658 658 bt = {}
659 659 for bn, heads in self.branchmap().iteritems():
660 660 bt[bn] = self._branchtip(heads)
661 661 return bt
662 662
663 663 def lookup(self, key):
664 664 return self[key].node()
665 665
666 666 def lookupbranch(self, key, remote=None):
667 667 repo = remote or self
668 668 if key in repo.branchmap():
669 669 return key
670 670
671 671 repo = (remote and remote.local()) and remote or self
672 672 return repo[key].branch()
673 673
674 674 def known(self, nodes):
675 675 nm = self.changelog.nodemap
676 676 pc = self._phasecache
677 677 result = []
678 678 for n in nodes:
679 679 r = nm.get(n)
680 680 resp = not (r is None or pc.phase(self, r) >= phases.secret)
681 681 result.append(resp)
682 682 return result
683 683
684 684 def local(self):
685 685 return self
686 686
687 687 def cancopy(self):
688 688 return self.local() # so statichttprepo's override of local() works
689 689
690 690 def join(self, f):
691 691 return os.path.join(self.path, f)
692 692
693 693 def wjoin(self, f):
694 694 return os.path.join(self.root, f)
695 695
696 696 def file(self, f):
697 697 if f[0] == '/':
698 698 f = f[1:]
699 699 return filelog.filelog(self.sopener, f)
700 700
701 701 def changectx(self, changeid):
702 702 return self[changeid]
703 703
704 704 def parents(self, changeid=None):
705 705 '''get list of changectxs for parents of changeid'''
706 706 return self[changeid].parents()
707 707
708 708 def setparents(self, p1, p2=nullid):
709 709 copies = self.dirstate.setparents(p1, p2)
710 710 pctx = self[p1]
711 711 if copies:
712 712 # Adjust copy records, the dirstate cannot do it, it
713 713 # requires access to parents manifests. Preserve them
714 714 # only for entries added to first parent.
715 715 for f in copies:
716 716 if f not in pctx and copies[f] in pctx:
717 717 self.dirstate.copy(copies[f], f)
718 718 if p2 == nullid:
719 719 for f, s in sorted(self.dirstate.copies().items()):
720 720 if f not in pctx and s not in pctx:
721 721 self.dirstate.copy(None, f)
722 722
723 723 def filectx(self, path, changeid=None, fileid=None):
724 724 """changeid can be a changeset revision, node, or tag.
725 725 fileid can be a file revision or node."""
726 726 return context.filectx(self, path, changeid, fileid)
727 727
728 728 def getcwd(self):
729 729 return self.dirstate.getcwd()
730 730
731 731 def pathto(self, f, cwd=None):
732 732 return self.dirstate.pathto(f, cwd)
733 733
734 734 def wfile(self, f, mode='r'):
735 735 return self.wopener(f, mode)
736 736
737 737 def _link(self, f):
738 738 return self.wvfs.islink(f)
739 739
740 740 def _loadfilter(self, filter):
741 741 if filter not in self.filterpats:
742 742 l = []
743 743 for pat, cmd in self.ui.configitems(filter):
744 744 if cmd == '!':
745 745 continue
746 746 mf = matchmod.match(self.root, '', [pat])
747 747 fn = None
748 748 params = cmd
749 749 for name, filterfn in self._datafilters.iteritems():
750 750 if cmd.startswith(name):
751 751 fn = filterfn
752 752 params = cmd[len(name):].lstrip()
753 753 break
754 754 if not fn:
755 755 fn = lambda s, c, **kwargs: util.filter(s, c)
756 756 # Wrap old filters not supporting keyword arguments
757 757 if not inspect.getargspec(fn)[2]:
758 758 oldfn = fn
759 759 fn = lambda s, c, **kwargs: oldfn(s, c)
760 760 l.append((mf, fn, params))
761 761 self.filterpats[filter] = l
762 762 return self.filterpats[filter]
763 763
764 764 def _filter(self, filterpats, filename, data):
765 765 for mf, fn, cmd in filterpats:
766 766 if mf(filename):
767 767 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
768 768 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
769 769 break
770 770
771 771 return data
772 772
773 773 @unfilteredpropertycache
774 774 def _encodefilterpats(self):
775 775 return self._loadfilter('encode')
776 776
777 777 @unfilteredpropertycache
778 778 def _decodefilterpats(self):
779 779 return self._loadfilter('decode')
780 780
781 781 def adddatafilter(self, name, filter):
782 782 self._datafilters[name] = filter
783 783
784 784 def wread(self, filename):
785 785 if self._link(filename):
786 786 data = self.wvfs.readlink(filename)
787 787 else:
788 788 data = self.wopener.read(filename)
789 789 return self._filter(self._encodefilterpats, filename, data)
790 790
791 791 def wwrite(self, filename, data, flags):
792 792 data = self._filter(self._decodefilterpats, filename, data)
793 793 if 'l' in flags:
794 794 self.wopener.symlink(data, filename)
795 795 else:
796 796 self.wopener.write(filename, data)
797 797 if 'x' in flags:
798 798 self.wvfs.setflags(filename, False, True)
799 799
800 800 def wwritedata(self, filename, data):
801 801 return self._filter(self._decodefilterpats, filename, data)
802 802
803 803 def transaction(self, desc):
804 804 tr = self._transref and self._transref() or None
805 805 if tr and tr.running():
806 806 return tr.nest()
807 807
808 808 # abort here if the journal already exists
809 809 if self.svfs.exists("journal"):
810 810 raise error.RepoError(
811 811 _("abandoned transaction found - run hg recover"))
812 812
813 813 self._writejournal(desc)
814 renames = [(x, undoname(x)) for x in self._journalfiles()]
814 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
815 815
816 816 tr = transaction.transaction(self.ui.warn, self.sopener,
817 817 self.sjoin("journal"),
818 818 aftertrans(renames),
819 819 self.store.createmode)
820 820 self._transref = weakref.ref(tr)
821 821 return tr
822 822
823 823 def _journalfiles(self):
824 return (self.sjoin('journal'), self.join('journal.dirstate'),
825 self.join('journal.branch'), self.join('journal.desc'),
826 self.join('journal.bookmarks'),
827 self.sjoin('journal.phaseroots'))
824 return ((self.svfs, 'journal'),
825 (self.vfs, 'journal.dirstate'),
826 (self.vfs, 'journal.branch'),
827 (self.vfs, 'journal.desc'),
828 (self.vfs, 'journal.bookmarks'),
829 (self.svfs, 'journal.phaseroots'))
828 830
829 831 def undofiles(self):
830 return [undoname(x) for x in self._journalfiles()]
832 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
831 833
832 834 def _writejournal(self, desc):
833 835 self.opener.write("journal.dirstate",
834 836 self.opener.tryread("dirstate"))
835 837 self.opener.write("journal.branch",
836 838 encoding.fromlocal(self.dirstate.branch()))
837 839 self.opener.write("journal.desc",
838 840 "%d\n%s\n" % (len(self), desc))
839 841 self.opener.write("journal.bookmarks",
840 842 self.opener.tryread("bookmarks"))
841 843 self.sopener.write("journal.phaseroots",
842 844 self.sopener.tryread("phaseroots"))
843 845
844 846 def recover(self):
845 847 lock = self.lock()
846 848 try:
847 849 if self.svfs.exists("journal"):
848 850 self.ui.status(_("rolling back interrupted transaction\n"))
849 851 transaction.rollback(self.sopener, self.sjoin("journal"),
850 852 self.ui.warn)
851 853 self.invalidate()
852 854 return True
853 855 else:
854 856 self.ui.warn(_("no interrupted transaction available\n"))
855 857 return False
856 858 finally:
857 859 lock.release()
858 860
859 861 def rollback(self, dryrun=False, force=False):
860 862 wlock = lock = None
861 863 try:
862 864 wlock = self.wlock()
863 865 lock = self.lock()
864 866 if self.svfs.exists("undo"):
865 867 return self._rollback(dryrun, force)
866 868 else:
867 869 self.ui.warn(_("no rollback information available\n"))
868 870 return 1
869 871 finally:
870 872 release(lock, wlock)
871 873
872 874 @unfilteredmethod # Until we get smarter cache management
873 875 def _rollback(self, dryrun, force):
874 876 ui = self.ui
875 877 try:
876 878 args = self.opener.read('undo.desc').splitlines()
877 879 (oldlen, desc, detail) = (int(args[0]), args[1], None)
878 880 if len(args) >= 3:
879 881 detail = args[2]
880 882 oldtip = oldlen - 1
881 883
882 884 if detail and ui.verbose:
883 885 msg = (_('repository tip rolled back to revision %s'
884 886 ' (undo %s: %s)\n')
885 887 % (oldtip, desc, detail))
886 888 else:
887 889 msg = (_('repository tip rolled back to revision %s'
888 890 ' (undo %s)\n')
889 891 % (oldtip, desc))
890 892 except IOError:
891 893 msg = _('rolling back unknown transaction\n')
892 894 desc = None
893 895
894 896 if not force and self['.'] != self['tip'] and desc == 'commit':
895 897 raise util.Abort(
896 898 _('rollback of last commit while not checked out '
897 899 'may lose data'), hint=_('use -f to force'))
898 900
899 901 ui.status(msg)
900 902 if dryrun:
901 903 return 0
902 904
903 905 parents = self.dirstate.parents()
904 906 self.destroying()
905 907 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
906 908 if self.vfs.exists('undo.bookmarks'):
907 909 self.vfs.rename('undo.bookmarks', 'bookmarks')
908 910 if self.svfs.exists('undo.phaseroots'):
909 911 self.svfs.rename('undo.phaseroots', 'phaseroots')
910 912 self.invalidate()
911 913
912 914 parentgone = (parents[0] not in self.changelog.nodemap or
913 915 parents[1] not in self.changelog.nodemap)
914 916 if parentgone:
915 917 self.vfs.rename('undo.dirstate', 'dirstate')
916 918 try:
917 919 branch = self.opener.read('undo.branch')
918 920 self.dirstate.setbranch(encoding.tolocal(branch))
919 921 except IOError:
920 922 ui.warn(_('named branch could not be reset: '
921 923 'current branch is still \'%s\'\n')
922 924 % self.dirstate.branch())
923 925
924 926 self.dirstate.invalidate()
925 927 parents = tuple([p.rev() for p in self.parents()])
926 928 if len(parents) > 1:
927 929 ui.status(_('working directory now based on '
928 930 'revisions %d and %d\n') % parents)
929 931 else:
930 932 ui.status(_('working directory now based on '
931 933 'revision %d\n') % parents)
932 934 # TODO: if we know which new heads may result from this rollback, pass
933 935 # them to destroy(), which will prevent the branchhead cache from being
934 936 # invalidated.
935 937 self.destroyed()
936 938 return 0
937 939
938 940 def invalidatecaches(self):
939 941
940 942 if '_tagscache' in vars(self):
941 943 # can't use delattr on proxy
942 944 del self.__dict__['_tagscache']
943 945
944 946 self.unfiltered()._branchcaches.clear()
945 947 self.invalidatevolatilesets()
946 948
947 949 def invalidatevolatilesets(self):
948 950 self.filteredrevcache.clear()
949 951 obsolete.clearobscaches(self)
950 952
951 953 def invalidatedirstate(self):
952 954 '''Invalidates the dirstate, causing the next call to dirstate
953 955 to check if it was modified since the last time it was read,
954 956 rereading it if it has.
955 957
956 958 This is different to dirstate.invalidate() that it doesn't always
957 959 rereads the dirstate. Use dirstate.invalidate() if you want to
958 960 explicitly read the dirstate again (i.e. restoring it to a previous
959 961 known good state).'''
960 962 if hasunfilteredcache(self, 'dirstate'):
961 963 for k in self.dirstate._filecache:
962 964 try:
963 965 delattr(self.dirstate, k)
964 966 except AttributeError:
965 967 pass
966 968 delattr(self.unfiltered(), 'dirstate')
967 969
968 970 def invalidate(self):
969 971 unfiltered = self.unfiltered() # all file caches are stored unfiltered
970 972 for k in self._filecache:
971 973 # dirstate is invalidated separately in invalidatedirstate()
972 974 if k == 'dirstate':
973 975 continue
974 976
975 977 try:
976 978 delattr(unfiltered, k)
977 979 except AttributeError:
978 980 pass
979 981 self.invalidatecaches()
980 982
981 983 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
982 984 try:
983 985 l = lock.lock(lockname, 0, releasefn, desc=desc)
984 986 except error.LockHeld, inst:
985 987 if not wait:
986 988 raise
987 989 self.ui.warn(_("waiting for lock on %s held by %r\n") %
988 990 (desc, inst.locker))
989 991 # default to 600 seconds timeout
990 992 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
991 993 releasefn, desc=desc)
992 994 if acquirefn:
993 995 acquirefn()
994 996 return l
995 997
996 998 def _afterlock(self, callback):
997 999 """add a callback to the current repository lock.
998 1000
999 1001 The callback will be executed on lock release."""
1000 1002 l = self._lockref and self._lockref()
1001 1003 if l:
1002 1004 l.postrelease.append(callback)
1003 1005 else:
1004 1006 callback()
1005 1007
1006 1008 def lock(self, wait=True):
1007 1009 '''Lock the repository store (.hg/store) and return a weak reference
1008 1010 to the lock. Use this before modifying the store (e.g. committing or
1009 1011 stripping). If you are opening a transaction, get a lock as well.)'''
1010 1012 l = self._lockref and self._lockref()
1011 1013 if l is not None and l.held:
1012 1014 l.lock()
1013 1015 return l
1014 1016
1015 1017 def unlock():
1016 1018 self.store.write()
1017 1019 if hasunfilteredcache(self, '_phasecache'):
1018 1020 self._phasecache.write()
1019 1021 for k, ce in self._filecache.items():
1020 1022 if k == 'dirstate' or k not in self.__dict__:
1021 1023 continue
1022 1024 ce.refresh()
1023 1025
1024 1026 l = self._lock(self.sjoin("lock"), wait, unlock,
1025 1027 self.invalidate, _('repository %s') % self.origroot)
1026 1028 self._lockref = weakref.ref(l)
1027 1029 return l
1028 1030
1029 1031 def wlock(self, wait=True):
1030 1032 '''Lock the non-store parts of the repository (everything under
1031 1033 .hg except .hg/store) and return a weak reference to the lock.
1032 1034 Use this before modifying files in .hg.'''
1033 1035 l = self._wlockref and self._wlockref()
1034 1036 if l is not None and l.held:
1035 1037 l.lock()
1036 1038 return l
1037 1039
1038 1040 def unlock():
1039 1041 self.dirstate.write()
1040 1042 self._filecache['dirstate'].refresh()
1041 1043
1042 1044 l = self._lock(self.join("wlock"), wait, unlock,
1043 1045 self.invalidatedirstate, _('working directory of %s') %
1044 1046 self.origroot)
1045 1047 self._wlockref = weakref.ref(l)
1046 1048 return l
1047 1049
1048 1050 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1049 1051 """
1050 1052 commit an individual file as part of a larger transaction
1051 1053 """
1052 1054
1053 1055 fname = fctx.path()
1054 1056 text = fctx.data()
1055 1057 flog = self.file(fname)
1056 1058 fparent1 = manifest1.get(fname, nullid)
1057 1059 fparent2 = fparent2o = manifest2.get(fname, nullid)
1058 1060
1059 1061 meta = {}
1060 1062 copy = fctx.renamed()
1061 1063 if copy and copy[0] != fname:
1062 1064 # Mark the new revision of this file as a copy of another
1063 1065 # file. This copy data will effectively act as a parent
1064 1066 # of this new revision. If this is a merge, the first
1065 1067 # parent will be the nullid (meaning "look up the copy data")
1066 1068 # and the second one will be the other parent. For example:
1067 1069 #
1068 1070 # 0 --- 1 --- 3 rev1 changes file foo
1069 1071 # \ / rev2 renames foo to bar and changes it
1070 1072 # \- 2 -/ rev3 should have bar with all changes and
1071 1073 # should record that bar descends from
1072 1074 # bar in rev2 and foo in rev1
1073 1075 #
1074 1076 # this allows this merge to succeed:
1075 1077 #
1076 1078 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1077 1079 # \ / merging rev3 and rev4 should use bar@rev2
1078 1080 # \- 2 --- 4 as the merge base
1079 1081 #
1080 1082
1081 1083 cfname = copy[0]
1082 1084 crev = manifest1.get(cfname)
1083 1085 newfparent = fparent2
1084 1086
1085 1087 if manifest2: # branch merge
1086 1088 if fparent2 == nullid or crev is None: # copied on remote side
1087 1089 if cfname in manifest2:
1088 1090 crev = manifest2[cfname]
1089 1091 newfparent = fparent1
1090 1092
1091 1093 # find source in nearest ancestor if we've lost track
1092 1094 if not crev:
1093 1095 self.ui.debug(" %s: searching for copy revision for %s\n" %
1094 1096 (fname, cfname))
1095 1097 for ancestor in self[None].ancestors():
1096 1098 if cfname in ancestor:
1097 1099 crev = ancestor[cfname].filenode()
1098 1100 break
1099 1101
1100 1102 if crev:
1101 1103 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1102 1104 meta["copy"] = cfname
1103 1105 meta["copyrev"] = hex(crev)
1104 1106 fparent1, fparent2 = nullid, newfparent
1105 1107 else:
1106 1108 self.ui.warn(_("warning: can't find ancestor for '%s' "
1107 1109 "copied from '%s'!\n") % (fname, cfname))
1108 1110
1109 1111 elif fparent2 != nullid:
1110 1112 # is one parent an ancestor of the other?
1111 1113 fparentancestor = flog.ancestor(fparent1, fparent2)
1112 1114 if fparentancestor == fparent1:
1113 1115 fparent1, fparent2 = fparent2, nullid
1114 1116 elif fparentancestor == fparent2:
1115 1117 fparent2 = nullid
1116 1118
1117 1119 # is the file changed?
1118 1120 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1119 1121 changelist.append(fname)
1120 1122 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1121 1123
1122 1124 # are just the flags changed during merge?
1123 1125 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1124 1126 changelist.append(fname)
1125 1127
1126 1128 return fparent1
1127 1129
1128 1130 @unfilteredmethod
1129 1131 def commit(self, text="", user=None, date=None, match=None, force=False,
1130 1132 editor=False, extra={}):
1131 1133 """Add a new revision to current repository.
1132 1134
1133 1135 Revision information is gathered from the working directory,
1134 1136 match can be used to filter the committed files. If editor is
1135 1137 supplied, it is called to get a commit message.
1136 1138 """
1137 1139
1138 1140 def fail(f, msg):
1139 1141 raise util.Abort('%s: %s' % (f, msg))
1140 1142
1141 1143 if not match:
1142 1144 match = matchmod.always(self.root, '')
1143 1145
1144 1146 if not force:
1145 1147 vdirs = []
1146 1148 match.dir = vdirs.append
1147 1149 match.bad = fail
1148 1150
1149 1151 wlock = self.wlock()
1150 1152 try:
1151 1153 wctx = self[None]
1152 1154 merge = len(wctx.parents()) > 1
1153 1155
1154 1156 if (not force and merge and match and
1155 1157 (match.files() or match.anypats())):
1156 1158 raise util.Abort(_('cannot partially commit a merge '
1157 1159 '(do not specify files or patterns)'))
1158 1160
1159 1161 changes = self.status(match=match, clean=force)
1160 1162 if force:
1161 1163 changes[0].extend(changes[6]) # mq may commit unchanged files
1162 1164
1163 1165 # check subrepos
1164 1166 subs = []
1165 1167 commitsubs = set()
1166 1168 newstate = wctx.substate.copy()
1167 1169 # only manage subrepos and .hgsubstate if .hgsub is present
1168 1170 if '.hgsub' in wctx:
1169 1171 # we'll decide whether to track this ourselves, thanks
1170 1172 if '.hgsubstate' in changes[0]:
1171 1173 changes[0].remove('.hgsubstate')
1172 1174 if '.hgsubstate' in changes[2]:
1173 1175 changes[2].remove('.hgsubstate')
1174 1176
1175 1177 # compare current state to last committed state
1176 1178 # build new substate based on last committed state
1177 1179 oldstate = wctx.p1().substate
1178 1180 for s in sorted(newstate.keys()):
1179 1181 if not match(s):
1180 1182 # ignore working copy, use old state if present
1181 1183 if s in oldstate:
1182 1184 newstate[s] = oldstate[s]
1183 1185 continue
1184 1186 if not force:
1185 1187 raise util.Abort(
1186 1188 _("commit with new subrepo %s excluded") % s)
1187 1189 if wctx.sub(s).dirty(True):
1188 1190 if not self.ui.configbool('ui', 'commitsubrepos'):
1189 1191 raise util.Abort(
1190 1192 _("uncommitted changes in subrepo %s") % s,
1191 1193 hint=_("use --subrepos for recursive commit"))
1192 1194 subs.append(s)
1193 1195 commitsubs.add(s)
1194 1196 else:
1195 1197 bs = wctx.sub(s).basestate()
1196 1198 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1197 1199 if oldstate.get(s, (None, None, None))[1] != bs:
1198 1200 subs.append(s)
1199 1201
1200 1202 # check for removed subrepos
1201 1203 for p in wctx.parents():
1202 1204 r = [s for s in p.substate if s not in newstate]
1203 1205 subs += [s for s in r if match(s)]
1204 1206 if subs:
1205 1207 if (not match('.hgsub') and
1206 1208 '.hgsub' in (wctx.modified() + wctx.added())):
1207 1209 raise util.Abort(
1208 1210 _("can't commit subrepos without .hgsub"))
1209 1211 changes[0].insert(0, '.hgsubstate')
1210 1212
1211 1213 elif '.hgsub' in changes[2]:
1212 1214 # clean up .hgsubstate when .hgsub is removed
1213 1215 if ('.hgsubstate' in wctx and
1214 1216 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1215 1217 changes[2].insert(0, '.hgsubstate')
1216 1218
1217 1219 # make sure all explicit patterns are matched
1218 1220 if not force and match.files():
1219 1221 matched = set(changes[0] + changes[1] + changes[2])
1220 1222
1221 1223 for f in match.files():
1222 1224 f = self.dirstate.normalize(f)
1223 1225 if f == '.' or f in matched or f in wctx.substate:
1224 1226 continue
1225 1227 if f in changes[3]: # missing
1226 1228 fail(f, _('file not found!'))
1227 1229 if f in vdirs: # visited directory
1228 1230 d = f + '/'
1229 1231 for mf in matched:
1230 1232 if mf.startswith(d):
1231 1233 break
1232 1234 else:
1233 1235 fail(f, _("no match under directory!"))
1234 1236 elif f not in self.dirstate:
1235 1237 fail(f, _("file not tracked!"))
1236 1238
1237 1239 cctx = context.workingctx(self, text, user, date, extra, changes)
1238 1240
1239 1241 if (not force and not extra.get("close") and not merge
1240 1242 and not cctx.files()
1241 1243 and wctx.branch() == wctx.p1().branch()):
1242 1244 return None
1243 1245
1244 1246 if merge and cctx.deleted():
1245 1247 raise util.Abort(_("cannot commit merge with missing files"))
1246 1248
1247 1249 ms = mergemod.mergestate(self)
1248 1250 for f in changes[0]:
1249 1251 if f in ms and ms[f] == 'u':
1250 1252 raise util.Abort(_("unresolved merge conflicts "
1251 1253 "(see hg help resolve)"))
1252 1254
1253 1255 if editor:
1254 1256 cctx._text = editor(self, cctx, subs)
1255 1257 edited = (text != cctx._text)
1256 1258
1257 1259 # commit subs and write new state
1258 1260 if subs:
1259 1261 for s in sorted(commitsubs):
1260 1262 sub = wctx.sub(s)
1261 1263 self.ui.status(_('committing subrepository %s\n') %
1262 1264 subrepo.subrelpath(sub))
1263 1265 sr = sub.commit(cctx._text, user, date)
1264 1266 newstate[s] = (newstate[s][0], sr)
1265 1267 subrepo.writestate(self, newstate)
1266 1268
1267 1269 # Save commit message in case this transaction gets rolled back
1268 1270 # (e.g. by a pretxncommit hook). Leave the content alone on
1269 1271 # the assumption that the user will use the same editor again.
1270 1272 msgfn = self.savecommitmessage(cctx._text)
1271 1273
1272 1274 p1, p2 = self.dirstate.parents()
1273 1275 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1274 1276 try:
1275 1277 self.hook("precommit", throw=True, parent1=hookp1,
1276 1278 parent2=hookp2)
1277 1279 ret = self.commitctx(cctx, True)
1278 1280 except: # re-raises
1279 1281 if edited:
1280 1282 self.ui.write(
1281 1283 _('note: commit message saved in %s\n') % msgfn)
1282 1284 raise
1283 1285
1284 1286 # update bookmarks, dirstate and mergestate
1285 1287 bookmarks.update(self, [p1, p2], ret)
1286 1288 cctx.markcommitted(ret)
1287 1289 ms.reset()
1288 1290 finally:
1289 1291 wlock.release()
1290 1292
1291 1293 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1292 1294 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1293 1295 self._afterlock(commithook)
1294 1296 return ret
1295 1297
1296 1298 @unfilteredmethod
1297 1299 def commitctx(self, ctx, error=False):
1298 1300 """Add a new revision to current repository.
1299 1301 Revision information is passed via the context argument.
1300 1302 """
1301 1303
1302 1304 tr = lock = None
1303 1305 removed = list(ctx.removed())
1304 1306 p1, p2 = ctx.p1(), ctx.p2()
1305 1307 user = ctx.user()
1306 1308
1307 1309 lock = self.lock()
1308 1310 try:
1309 1311 tr = self.transaction("commit")
1310 1312 trp = weakref.proxy(tr)
1311 1313
1312 1314 if ctx.files():
1313 1315 m1 = p1.manifest().copy()
1314 1316 m2 = p2.manifest()
1315 1317
1316 1318 # check in files
1317 1319 new = {}
1318 1320 changed = []
1319 1321 linkrev = len(self)
1320 1322 for f in sorted(ctx.modified() + ctx.added()):
1321 1323 self.ui.note(f + "\n")
1322 1324 try:
1323 1325 fctx = ctx[f]
1324 1326 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1325 1327 changed)
1326 1328 m1.set(f, fctx.flags())
1327 1329 except OSError, inst:
1328 1330 self.ui.warn(_("trouble committing %s!\n") % f)
1329 1331 raise
1330 1332 except IOError, inst:
1331 1333 errcode = getattr(inst, 'errno', errno.ENOENT)
1332 1334 if error or errcode and errcode != errno.ENOENT:
1333 1335 self.ui.warn(_("trouble committing %s!\n") % f)
1334 1336 raise
1335 1337 else:
1336 1338 removed.append(f)
1337 1339
1338 1340 # update manifest
1339 1341 m1.update(new)
1340 1342 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1341 1343 drop = [f for f in removed if f in m1]
1342 1344 for f in drop:
1343 1345 del m1[f]
1344 1346 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1345 1347 p2.manifestnode(), (new, drop))
1346 1348 files = changed + removed
1347 1349 else:
1348 1350 mn = p1.manifestnode()
1349 1351 files = []
1350 1352
1351 1353 # update changelog
1352 1354 self.changelog.delayupdate()
1353 1355 n = self.changelog.add(mn, files, ctx.description(),
1354 1356 trp, p1.node(), p2.node(),
1355 1357 user, ctx.date(), ctx.extra().copy())
1356 1358 p = lambda: self.changelog.writepending() and self.root or ""
1357 1359 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1358 1360 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1359 1361 parent2=xp2, pending=p)
1360 1362 self.changelog.finalize(trp)
1361 1363 # set the new commit is proper phase
1362 1364 targetphase = phases.newcommitphase(self.ui)
1363 1365 if targetphase:
1364 1366 # retract boundary do not alter parent changeset.
1365 1367 # if a parent have higher the resulting phase will
1366 1368 # be compliant anyway
1367 1369 #
1368 1370 # if minimal phase was 0 we don't need to retract anything
1369 1371 phases.retractboundary(self, targetphase, [n])
1370 1372 tr.close()
1371 1373 branchmap.updatecache(self.filtered('served'))
1372 1374 return n
1373 1375 finally:
1374 1376 if tr:
1375 1377 tr.release()
1376 1378 lock.release()
1377 1379
1378 1380 @unfilteredmethod
1379 1381 def destroying(self):
1380 1382 '''Inform the repository that nodes are about to be destroyed.
1381 1383 Intended for use by strip and rollback, so there's a common
1382 1384 place for anything that has to be done before destroying history.
1383 1385
1384 1386 This is mostly useful for saving state that is in memory and waiting
1385 1387 to be flushed when the current lock is released. Because a call to
1386 1388 destroyed is imminent, the repo will be invalidated causing those
1387 1389 changes to stay in memory (waiting for the next unlock), or vanish
1388 1390 completely.
1389 1391 '''
1390 1392 # When using the same lock to commit and strip, the phasecache is left
1391 1393 # dirty after committing. Then when we strip, the repo is invalidated,
1392 1394 # causing those changes to disappear.
1393 1395 if '_phasecache' in vars(self):
1394 1396 self._phasecache.write()
1395 1397
1396 1398 @unfilteredmethod
1397 1399 def destroyed(self):
1398 1400 '''Inform the repository that nodes have been destroyed.
1399 1401 Intended for use by strip and rollback, so there's a common
1400 1402 place for anything that has to be done after destroying history.
1401 1403 '''
1402 1404 # When one tries to:
1403 1405 # 1) destroy nodes thus calling this method (e.g. strip)
1404 1406 # 2) use phasecache somewhere (e.g. commit)
1405 1407 #
1406 1408 # then 2) will fail because the phasecache contains nodes that were
1407 1409 # removed. We can either remove phasecache from the filecache,
1408 1410 # causing it to reload next time it is accessed, or simply filter
1409 1411 # the removed nodes now and write the updated cache.
1410 1412 self._phasecache.filterunknown(self)
1411 1413 self._phasecache.write()
1412 1414
1413 1415 # update the 'served' branch cache to help read only server process
1414 1416 # Thanks to branchcache collaboration this is done from the nearest
1415 1417 # filtered subset and it is expected to be fast.
1416 1418 branchmap.updatecache(self.filtered('served'))
1417 1419
1418 1420 # Ensure the persistent tag cache is updated. Doing it now
1419 1421 # means that the tag cache only has to worry about destroyed
1420 1422 # heads immediately after a strip/rollback. That in turn
1421 1423 # guarantees that "cachetip == currenttip" (comparing both rev
1422 1424 # and node) always means no nodes have been added or destroyed.
1423 1425
1424 1426 # XXX this is suboptimal when qrefresh'ing: we strip the current
1425 1427 # head, refresh the tag cache, then immediately add a new head.
1426 1428 # But I think doing it this way is necessary for the "instant
1427 1429 # tag cache retrieval" case to work.
1428 1430 self.invalidate()
1429 1431
1430 1432 def walk(self, match, node=None):
1431 1433 '''
1432 1434 walk recursively through the directory tree or a given
1433 1435 changeset, finding all files matched by the match
1434 1436 function
1435 1437 '''
1436 1438 return self[node].walk(match)
1437 1439
1438 1440 def status(self, node1='.', node2=None, match=None,
1439 1441 ignored=False, clean=False, unknown=False,
1440 1442 listsubrepos=False):
1441 1443 """return status of files between two nodes or node and working
1442 1444 directory.
1443 1445
1444 1446 If node1 is None, use the first dirstate parent instead.
1445 1447 If node2 is None, compare node1 with working directory.
1446 1448 """
1447 1449
1448 1450 def mfmatches(ctx):
1449 1451 mf = ctx.manifest().copy()
1450 1452 if match.always():
1451 1453 return mf
1452 1454 for fn in mf.keys():
1453 1455 if not match(fn):
1454 1456 del mf[fn]
1455 1457 return mf
1456 1458
1457 1459 if isinstance(node1, context.changectx):
1458 1460 ctx1 = node1
1459 1461 else:
1460 1462 ctx1 = self[node1]
1461 1463 if isinstance(node2, context.changectx):
1462 1464 ctx2 = node2
1463 1465 else:
1464 1466 ctx2 = self[node2]
1465 1467
1466 1468 working = ctx2.rev() is None
1467 1469 parentworking = working and ctx1 == self['.']
1468 1470 match = match or matchmod.always(self.root, self.getcwd())
1469 1471 listignored, listclean, listunknown = ignored, clean, unknown
1470 1472
1471 1473 # load earliest manifest first for caching reasons
1472 1474 if not working and ctx2.rev() < ctx1.rev():
1473 1475 ctx2.manifest()
1474 1476
1475 1477 if not parentworking:
1476 1478 def bad(f, msg):
1477 1479 # 'f' may be a directory pattern from 'match.files()',
1478 1480 # so 'f not in ctx1' is not enough
1479 1481 if f not in ctx1 and f not in ctx1.dirs():
1480 1482 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1481 1483 match.bad = bad
1482 1484
1483 1485 if working: # we need to scan the working dir
1484 1486 subrepos = []
1485 1487 if '.hgsub' in self.dirstate:
1486 1488 subrepos = sorted(ctx2.substate)
1487 1489 s = self.dirstate.status(match, subrepos, listignored,
1488 1490 listclean, listunknown)
1489 1491 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1490 1492
1491 1493 # check for any possibly clean files
1492 1494 if parentworking and cmp:
1493 1495 fixup = []
1494 1496 # do a full compare of any files that might have changed
1495 1497 for f in sorted(cmp):
1496 1498 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1497 1499 or ctx1[f].cmp(ctx2[f])):
1498 1500 modified.append(f)
1499 1501 else:
1500 1502 fixup.append(f)
1501 1503
1502 1504 # update dirstate for files that are actually clean
1503 1505 if fixup:
1504 1506 if listclean:
1505 1507 clean += fixup
1506 1508
1507 1509 try:
1508 1510 # updating the dirstate is optional
1509 1511 # so we don't wait on the lock
1510 1512 wlock = self.wlock(False)
1511 1513 try:
1512 1514 for f in fixup:
1513 1515 self.dirstate.normal(f)
1514 1516 finally:
1515 1517 wlock.release()
1516 1518 except error.LockError:
1517 1519 pass
1518 1520
1519 1521 if not parentworking:
1520 1522 mf1 = mfmatches(ctx1)
1521 1523 if working:
1522 1524 # we are comparing working dir against non-parent
1523 1525 # generate a pseudo-manifest for the working dir
1524 1526 mf2 = mfmatches(self['.'])
1525 1527 for f in cmp + modified + added:
1526 1528 mf2[f] = None
1527 1529 mf2.set(f, ctx2.flags(f))
1528 1530 for f in removed:
1529 1531 if f in mf2:
1530 1532 del mf2[f]
1531 1533 else:
1532 1534 # we are comparing two revisions
1533 1535 deleted, unknown, ignored = [], [], []
1534 1536 mf2 = mfmatches(ctx2)
1535 1537
1536 1538 modified, added, clean = [], [], []
1537 1539 withflags = mf1.withflags() | mf2.withflags()
1538 1540 for fn, mf2node in mf2.iteritems():
1539 1541 if fn in mf1:
1540 1542 if (fn not in deleted and
1541 1543 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1542 1544 (mf1[fn] != mf2node and
1543 1545 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1544 1546 modified.append(fn)
1545 1547 elif listclean:
1546 1548 clean.append(fn)
1547 1549 del mf1[fn]
1548 1550 elif fn not in deleted:
1549 1551 added.append(fn)
1550 1552 removed = mf1.keys()
1551 1553
1552 1554 if working and modified and not self.dirstate._checklink:
1553 1555 # Symlink placeholders may get non-symlink-like contents
1554 1556 # via user error or dereferencing by NFS or Samba servers,
1555 1557 # so we filter out any placeholders that don't look like a
1556 1558 # symlink
1557 1559 sane = []
1558 1560 for f in modified:
1559 1561 if ctx2.flags(f) == 'l':
1560 1562 d = ctx2[f].data()
1561 1563 if len(d) >= 1024 or '\n' in d or util.binary(d):
1562 1564 self.ui.debug('ignoring suspect symlink placeholder'
1563 1565 ' "%s"\n' % f)
1564 1566 continue
1565 1567 sane.append(f)
1566 1568 modified = sane
1567 1569
1568 1570 r = modified, added, removed, deleted, unknown, ignored, clean
1569 1571
1570 1572 if listsubrepos:
1571 1573 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1572 1574 if working:
1573 1575 rev2 = None
1574 1576 else:
1575 1577 rev2 = ctx2.substate[subpath][1]
1576 1578 try:
1577 1579 submatch = matchmod.narrowmatcher(subpath, match)
1578 1580 s = sub.status(rev2, match=submatch, ignored=listignored,
1579 1581 clean=listclean, unknown=listunknown,
1580 1582 listsubrepos=True)
1581 1583 for rfiles, sfiles in zip(r, s):
1582 1584 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1583 1585 except error.LookupError:
1584 1586 self.ui.status(_("skipping missing subrepository: %s\n")
1585 1587 % subpath)
1586 1588
1587 1589 for l in r:
1588 1590 l.sort()
1589 1591 return r
1590 1592
1591 1593 def heads(self, start=None):
1592 1594 heads = self.changelog.heads(start)
1593 1595 # sort the output in rev descending order
1594 1596 return sorted(heads, key=self.changelog.rev, reverse=True)
1595 1597
1596 1598 def branchheads(self, branch=None, start=None, closed=False):
1597 1599 '''return a (possibly filtered) list of heads for the given branch
1598 1600
1599 1601 Heads are returned in topological order, from newest to oldest.
1600 1602 If branch is None, use the dirstate branch.
1601 1603 If start is not None, return only heads reachable from start.
1602 1604 If closed is True, return heads that are marked as closed as well.
1603 1605 '''
1604 1606 if branch is None:
1605 1607 branch = self[None].branch()
1606 1608 branches = self.branchmap()
1607 1609 if branch not in branches:
1608 1610 return []
1609 1611 # the cache returns heads ordered lowest to highest
1610 1612 bheads = list(reversed(branches[branch]))
1611 1613 if start is not None:
1612 1614 # filter out the heads that cannot be reached from startrev
1613 1615 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1614 1616 bheads = [h for h in bheads if h in fbheads]
1615 1617 if not closed:
1616 1618 bheads = [h for h in bheads if not self[h].closesbranch()]
1617 1619 return bheads
1618 1620
1619 1621 def branches(self, nodes):
1620 1622 if not nodes:
1621 1623 nodes = [self.changelog.tip()]
1622 1624 b = []
1623 1625 for n in nodes:
1624 1626 t = n
1625 1627 while True:
1626 1628 p = self.changelog.parents(n)
1627 1629 if p[1] != nullid or p[0] == nullid:
1628 1630 b.append((t, n, p[0], p[1]))
1629 1631 break
1630 1632 n = p[0]
1631 1633 return b
1632 1634
1633 1635 def between(self, pairs):
1634 1636 r = []
1635 1637
1636 1638 for top, bottom in pairs:
1637 1639 n, l, i = top, [], 0
1638 1640 f = 1
1639 1641
1640 1642 while n != bottom and n != nullid:
1641 1643 p = self.changelog.parents(n)[0]
1642 1644 if i == f:
1643 1645 l.append(n)
1644 1646 f = f * 2
1645 1647 n = p
1646 1648 i += 1
1647 1649
1648 1650 r.append(l)
1649 1651
1650 1652 return r
1651 1653
1652 1654 def pull(self, remote, heads=None, force=False):
1653 1655 # don't open transaction for nothing or you break future useful
1654 1656 # rollback call
1655 1657 tr = None
1656 1658 trname = 'pull\n' + util.hidepassword(remote.url())
1657 1659 lock = self.lock()
1658 1660 try:
1659 1661 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1660 1662 force=force)
1661 1663 common, fetch, rheads = tmp
1662 1664 if not fetch:
1663 1665 self.ui.status(_("no changes found\n"))
1664 1666 added = []
1665 1667 result = 0
1666 1668 else:
1667 1669 tr = self.transaction(trname)
1668 1670 if heads is None and list(common) == [nullid]:
1669 1671 self.ui.status(_("requesting all changes\n"))
1670 1672 elif heads is None and remote.capable('changegroupsubset'):
1671 1673 # issue1320, avoid a race if remote changed after discovery
1672 1674 heads = rheads
1673 1675
1674 1676 if remote.capable('getbundle'):
1675 1677 cg = remote.getbundle('pull', common=common,
1676 1678 heads=heads or rheads)
1677 1679 elif heads is None:
1678 1680 cg = remote.changegroup(fetch, 'pull')
1679 1681 elif not remote.capable('changegroupsubset'):
1680 1682 raise util.Abort(_("partial pull cannot be done because "
1681 1683 "other repository doesn't support "
1682 1684 "changegroupsubset."))
1683 1685 else:
1684 1686 cg = remote.changegroupsubset(fetch, heads, 'pull')
1685 1687 # we use unfiltered changelog here because hidden revision must
1686 1688 # be taken in account for phase synchronization. They may
1687 1689 # becomes public and becomes visible again.
1688 1690 cl = self.unfiltered().changelog
1689 1691 clstart = len(cl)
1690 1692 result = self.addchangegroup(cg, 'pull', remote.url())
1691 1693 clend = len(cl)
1692 1694 added = [cl.node(r) for r in xrange(clstart, clend)]
1693 1695
1694 1696 # compute target subset
1695 1697 if heads is None:
1696 1698 # We pulled every thing possible
1697 1699 # sync on everything common
1698 1700 subset = common + added
1699 1701 else:
1700 1702 # We pulled a specific subset
1701 1703 # sync on this subset
1702 1704 subset = heads
1703 1705
1704 1706 # Get remote phases data from remote
1705 1707 remotephases = remote.listkeys('phases')
1706 1708 publishing = bool(remotephases.get('publishing', False))
1707 1709 if remotephases and not publishing:
1708 1710 # remote is new and unpublishing
1709 1711 pheads, _dr = phases.analyzeremotephases(self, subset,
1710 1712 remotephases)
1711 1713 phases.advanceboundary(self, phases.public, pheads)
1712 1714 phases.advanceboundary(self, phases.draft, subset)
1713 1715 else:
1714 1716 # Remote is old or publishing all common changesets
1715 1717 # should be seen as public
1716 1718 phases.advanceboundary(self, phases.public, subset)
1717 1719
1718 1720 if obsolete._enabled:
1719 1721 self.ui.debug('fetching remote obsolete markers\n')
1720 1722 remoteobs = remote.listkeys('obsolete')
1721 1723 if 'dump0' in remoteobs:
1722 1724 if tr is None:
1723 1725 tr = self.transaction(trname)
1724 1726 for key in sorted(remoteobs, reverse=True):
1725 1727 if key.startswith('dump'):
1726 1728 data = base85.b85decode(remoteobs[key])
1727 1729 self.obsstore.mergemarkers(tr, data)
1728 1730 self.invalidatevolatilesets()
1729 1731 if tr is not None:
1730 1732 tr.close()
1731 1733 finally:
1732 1734 if tr is not None:
1733 1735 tr.release()
1734 1736 lock.release()
1735 1737
1736 1738 return result
1737 1739
1738 1740 def checkpush(self, force, revs):
1739 1741 """Extensions can override this function if additional checks have
1740 1742 to be performed before pushing, or call it if they override push
1741 1743 command.
1742 1744 """
1743 1745 pass
1744 1746
1745 1747 def push(self, remote, force=False, revs=None, newbranch=False):
1746 1748 '''Push outgoing changesets (limited by revs) from the current
1747 1749 repository to remote. Return an integer:
1748 1750 - None means nothing to push
1749 1751 - 0 means HTTP error
1750 1752 - 1 means we pushed and remote head count is unchanged *or*
1751 1753 we have outgoing changesets but refused to push
1752 1754 - other values as described by addchangegroup()
1753 1755 '''
1754 1756 # there are two ways to push to remote repo:
1755 1757 #
1756 1758 # addchangegroup assumes local user can lock remote
1757 1759 # repo (local filesystem, old ssh servers).
1758 1760 #
1759 1761 # unbundle assumes local user cannot lock remote repo (new ssh
1760 1762 # servers, http servers).
1761 1763
1762 1764 if not remote.canpush():
1763 1765 raise util.Abort(_("destination does not support push"))
1764 1766 unfi = self.unfiltered()
1765 1767 # get local lock as we might write phase data
1766 1768 locallock = self.lock()
1767 1769 try:
1768 1770 self.checkpush(force, revs)
1769 1771 lock = None
1770 1772 unbundle = remote.capable('unbundle')
1771 1773 if not unbundle:
1772 1774 lock = remote.lock()
1773 1775 try:
1774 1776 # discovery
1775 1777 fci = discovery.findcommonincoming
1776 1778 commoninc = fci(unfi, remote, force=force)
1777 1779 common, inc, remoteheads = commoninc
1778 1780 fco = discovery.findcommonoutgoing
1779 1781 outgoing = fco(unfi, remote, onlyheads=revs,
1780 1782 commoninc=commoninc, force=force)
1781 1783
1782 1784
1783 1785 if not outgoing.missing:
1784 1786 # nothing to push
1785 1787 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1786 1788 ret = None
1787 1789 else:
1788 1790 # something to push
1789 1791 if not force:
1790 1792 # if self.obsstore == False --> no obsolete
1791 1793 # then, save the iteration
1792 1794 if unfi.obsstore:
1793 1795 # this message are here for 80 char limit reason
1794 1796 mso = _("push includes obsolete changeset: %s!")
1795 1797 mst = "push includes %s changeset: %s!"
1796 1798 # plain versions for i18n tool to detect them
1797 1799 _("push includes unstable changeset: %s!")
1798 1800 _("push includes bumped changeset: %s!")
1799 1801 _("push includes divergent changeset: %s!")
1800 1802 # If we are to push if there is at least one
1801 1803 # obsolete or unstable changeset in missing, at
1802 1804 # least one of the missinghead will be obsolete or
1803 1805 # unstable. So checking heads only is ok
1804 1806 for node in outgoing.missingheads:
1805 1807 ctx = unfi[node]
1806 1808 if ctx.obsolete():
1807 1809 raise util.Abort(mso % ctx)
1808 1810 elif ctx.troubled():
1809 1811 raise util.Abort(_(mst)
1810 1812 % (ctx.troubles()[0],
1811 1813 ctx))
1812 1814 discovery.checkheads(unfi, remote, outgoing,
1813 1815 remoteheads, newbranch,
1814 1816 bool(inc))
1815 1817
1816 1818 # create a changegroup from local
1817 1819 if revs is None and not outgoing.excluded:
1818 1820 # push everything,
1819 1821 # use the fast path, no race possible on push
1820 1822 cg = self._changegroup(outgoing.missing, 'push')
1821 1823 else:
1822 1824 cg = self.getlocalbundle('push', outgoing)
1823 1825
1824 1826 # apply changegroup to remote
1825 1827 if unbundle:
1826 1828 # local repo finds heads on server, finds out what
1827 1829 # revs it must push. once revs transferred, if server
1828 1830 # finds it has different heads (someone else won
1829 1831 # commit/push race), server aborts.
1830 1832 if force:
1831 1833 remoteheads = ['force']
1832 1834 # ssh: return remote's addchangegroup()
1833 1835 # http: return remote's addchangegroup() or 0 for error
1834 1836 ret = remote.unbundle(cg, remoteheads, 'push')
1835 1837 else:
1836 1838 # we return an integer indicating remote head count
1837 1839 # change
1838 1840 ret = remote.addchangegroup(cg, 'push', self.url())
1839 1841
1840 1842 if ret:
1841 1843 # push succeed, synchronize target of the push
1842 1844 cheads = outgoing.missingheads
1843 1845 elif revs is None:
1844 1846 # All out push fails. synchronize all common
1845 1847 cheads = outgoing.commonheads
1846 1848 else:
1847 1849 # I want cheads = heads(::missingheads and ::commonheads)
1848 1850 # (missingheads is revs with secret changeset filtered out)
1849 1851 #
1850 1852 # This can be expressed as:
1851 1853 # cheads = ( (missingheads and ::commonheads)
1852 1854 # + (commonheads and ::missingheads))"
1853 1855 # )
1854 1856 #
1855 1857 # while trying to push we already computed the following:
1856 1858 # common = (::commonheads)
1857 1859 # missing = ((commonheads::missingheads) - commonheads)
1858 1860 #
1859 1861 # We can pick:
1860 1862 # * missingheads part of common (::commonheads)
1861 1863 common = set(outgoing.common)
1862 1864 cheads = [node for node in revs if node in common]
1863 1865 # and
1864 1866 # * commonheads parents on missing
1865 1867 revset = unfi.set('%ln and parents(roots(%ln))',
1866 1868 outgoing.commonheads,
1867 1869 outgoing.missing)
1868 1870 cheads.extend(c.node() for c in revset)
1869 1871 # even when we don't push, exchanging phase data is useful
1870 1872 remotephases = remote.listkeys('phases')
1871 1873 if (self.ui.configbool('ui', '_usedassubrepo', False)
1872 1874 and remotephases # server supports phases
1873 1875 and ret is None # nothing was pushed
1874 1876 and remotephases.get('publishing', False)):
1875 1877 # When:
1876 1878 # - this is a subrepo push
1877 1879 # - and remote support phase
1878 1880 # - and no changeset was pushed
1879 1881 # - and remote is publishing
1880 1882 # We may be in issue 3871 case!
1881 1883 # We drop the possible phase synchronisation done by
1882 1884 # courtesy to publish changesets possibly locally draft
1883 1885 # on the remote.
1884 1886 remotephases = {'publishing': 'True'}
1885 1887 if not remotephases: # old server or public only repo
1886 1888 phases.advanceboundary(self, phases.public, cheads)
1887 1889 # don't push any phase data as there is nothing to push
1888 1890 else:
1889 1891 ana = phases.analyzeremotephases(self, cheads, remotephases)
1890 1892 pheads, droots = ana
1891 1893 ### Apply remote phase on local
1892 1894 if remotephases.get('publishing', False):
1893 1895 phases.advanceboundary(self, phases.public, cheads)
1894 1896 else: # publish = False
1895 1897 phases.advanceboundary(self, phases.public, pheads)
1896 1898 phases.advanceboundary(self, phases.draft, cheads)
1897 1899 ### Apply local phase on remote
1898 1900
1899 1901 # Get the list of all revs draft on remote by public here.
1900 1902 # XXX Beware that revset break if droots is not strictly
1901 1903 # XXX root we may want to ensure it is but it is costly
1902 1904 outdated = unfi.set('heads((%ln::%ln) and public())',
1903 1905 droots, cheads)
1904 1906 for newremotehead in outdated:
1905 1907 r = remote.pushkey('phases',
1906 1908 newremotehead.hex(),
1907 1909 str(phases.draft),
1908 1910 str(phases.public))
1909 1911 if not r:
1910 1912 self.ui.warn(_('updating %s to public failed!\n')
1911 1913 % newremotehead)
1912 1914 self.ui.debug('try to push obsolete markers to remote\n')
1913 1915 if (obsolete._enabled and self.obsstore and
1914 1916 'obsolete' in remote.listkeys('namespaces')):
1915 1917 rslts = []
1916 1918 remotedata = self.listkeys('obsolete')
1917 1919 for key in sorted(remotedata, reverse=True):
1918 1920 # reverse sort to ensure we end with dump0
1919 1921 data = remotedata[key]
1920 1922 rslts.append(remote.pushkey('obsolete', key, '', data))
1921 1923 if [r for r in rslts if not r]:
1922 1924 msg = _('failed to push some obsolete markers!\n')
1923 1925 self.ui.warn(msg)
1924 1926 finally:
1925 1927 if lock is not None:
1926 1928 lock.release()
1927 1929 finally:
1928 1930 locallock.release()
1929 1931
1930 1932 self.ui.debug("checking for updated bookmarks\n")
1931 1933 rb = remote.listkeys('bookmarks')
1932 1934 for k in rb.keys():
1933 1935 if k in unfi._bookmarks:
1934 1936 nr, nl = rb[k], hex(self._bookmarks[k])
1935 1937 if nr in unfi:
1936 1938 cr = unfi[nr]
1937 1939 cl = unfi[nl]
1938 1940 if bookmarks.validdest(unfi, cr, cl):
1939 1941 r = remote.pushkey('bookmarks', k, nr, nl)
1940 1942 if r:
1941 1943 self.ui.status(_("updating bookmark %s\n") % k)
1942 1944 else:
1943 1945 self.ui.warn(_('updating bookmark %s'
1944 1946 ' failed!\n') % k)
1945 1947
1946 1948 return ret
1947 1949
1948 1950 def changegroupinfo(self, nodes, source):
1949 1951 if self.ui.verbose or source == 'bundle':
1950 1952 self.ui.status(_("%d changesets found\n") % len(nodes))
1951 1953 if self.ui.debugflag:
1952 1954 self.ui.debug("list of changesets:\n")
1953 1955 for node in nodes:
1954 1956 self.ui.debug("%s\n" % hex(node))
1955 1957
1956 1958 def changegroupsubset(self, bases, heads, source):
1957 1959 """Compute a changegroup consisting of all the nodes that are
1958 1960 descendants of any of the bases and ancestors of any of the heads.
1959 1961 Return a chunkbuffer object whose read() method will return
1960 1962 successive changegroup chunks.
1961 1963
1962 1964 It is fairly complex as determining which filenodes and which
1963 1965 manifest nodes need to be included for the changeset to be complete
1964 1966 is non-trivial.
1965 1967
1966 1968 Another wrinkle is doing the reverse, figuring out which changeset in
1967 1969 the changegroup a particular filenode or manifestnode belongs to.
1968 1970 """
1969 1971 cl = self.changelog
1970 1972 if not bases:
1971 1973 bases = [nullid]
1972 1974 csets, bases, heads = cl.nodesbetween(bases, heads)
1973 1975 # We assume that all ancestors of bases are known
1974 1976 common = cl.ancestors([cl.rev(n) for n in bases])
1975 1977 return self._changegroupsubset(common, csets, heads, source)
1976 1978
1977 1979 def getlocalbundle(self, source, outgoing):
1978 1980 """Like getbundle, but taking a discovery.outgoing as an argument.
1979 1981
1980 1982 This is only implemented for local repos and reuses potentially
1981 1983 precomputed sets in outgoing."""
1982 1984 if not outgoing.missing:
1983 1985 return None
1984 1986 return self._changegroupsubset(outgoing.common,
1985 1987 outgoing.missing,
1986 1988 outgoing.missingheads,
1987 1989 source)
1988 1990
1989 1991 def getbundle(self, source, heads=None, common=None):
1990 1992 """Like changegroupsubset, but returns the set difference between the
1991 1993 ancestors of heads and the ancestors common.
1992 1994
1993 1995 If heads is None, use the local heads. If common is None, use [nullid].
1994 1996
1995 1997 The nodes in common might not all be known locally due to the way the
1996 1998 current discovery protocol works.
1997 1999 """
1998 2000 cl = self.changelog
1999 2001 if common:
2000 2002 hasnode = cl.hasnode
2001 2003 common = [n for n in common if hasnode(n)]
2002 2004 else:
2003 2005 common = [nullid]
2004 2006 if not heads:
2005 2007 heads = cl.heads()
2006 2008 return self.getlocalbundle(source,
2007 2009 discovery.outgoing(cl, common, heads))
2008 2010
2009 2011 @unfilteredmethod
2010 2012 def _changegroupsubset(self, commonrevs, csets, heads, source):
2011 2013
2012 2014 cl = self.changelog
2013 2015 mf = self.manifest
2014 2016 mfs = {} # needed manifests
2015 2017 fnodes = {} # needed file nodes
2016 2018 changedfiles = set()
2017 2019 fstate = ['', {}]
2018 2020 count = [0, 0]
2019 2021
2020 2022 # can we go through the fast path ?
2021 2023 heads.sort()
2022 2024 if heads == sorted(self.heads()):
2023 2025 return self._changegroup(csets, source)
2024 2026
2025 2027 # slow path
2026 2028 self.hook('preoutgoing', throw=True, source=source)
2027 2029 self.changegroupinfo(csets, source)
2028 2030
2029 2031 # filter any nodes that claim to be part of the known set
2030 2032 def prune(revlog, missing):
2031 2033 rr, rl = revlog.rev, revlog.linkrev
2032 2034 return [n for n in missing
2033 2035 if rl(rr(n)) not in commonrevs]
2034 2036
2035 2037 progress = self.ui.progress
2036 2038 _bundling = _('bundling')
2037 2039 _changesets = _('changesets')
2038 2040 _manifests = _('manifests')
2039 2041 _files = _('files')
2040 2042
2041 2043 def lookup(revlog, x):
2042 2044 if revlog == cl:
2043 2045 c = cl.read(x)
2044 2046 changedfiles.update(c[3])
2045 2047 mfs.setdefault(c[0], x)
2046 2048 count[0] += 1
2047 2049 progress(_bundling, count[0],
2048 2050 unit=_changesets, total=count[1])
2049 2051 return x
2050 2052 elif revlog == mf:
2051 2053 clnode = mfs[x]
2052 2054 mdata = mf.readfast(x)
2053 2055 for f, n in mdata.iteritems():
2054 2056 if f in changedfiles:
2055 2057 fnodes[f].setdefault(n, clnode)
2056 2058 count[0] += 1
2057 2059 progress(_bundling, count[0],
2058 2060 unit=_manifests, total=count[1])
2059 2061 return clnode
2060 2062 else:
2061 2063 progress(_bundling, count[0], item=fstate[0],
2062 2064 unit=_files, total=count[1])
2063 2065 return fstate[1][x]
2064 2066
2065 2067 bundler = changegroup.bundle10(lookup)
2066 2068 reorder = self.ui.config('bundle', 'reorder', 'auto')
2067 2069 if reorder == 'auto':
2068 2070 reorder = None
2069 2071 else:
2070 2072 reorder = util.parsebool(reorder)
2071 2073
2072 2074 def gengroup():
2073 2075 # Create a changenode group generator that will call our functions
2074 2076 # back to lookup the owning changenode and collect information.
2075 2077 count[:] = [0, len(csets)]
2076 2078 for chunk in cl.group(csets, bundler, reorder=reorder):
2077 2079 yield chunk
2078 2080 progress(_bundling, None)
2079 2081
2080 2082 # Create a generator for the manifestnodes that calls our lookup
2081 2083 # and data collection functions back.
2082 2084 for f in changedfiles:
2083 2085 fnodes[f] = {}
2084 2086 count[:] = [0, len(mfs)]
2085 2087 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2086 2088 yield chunk
2087 2089 progress(_bundling, None)
2088 2090
2089 2091 mfs.clear()
2090 2092
2091 2093 # Go through all our files in order sorted by name.
2092 2094 count[:] = [0, len(changedfiles)]
2093 2095 for fname in sorted(changedfiles):
2094 2096 filerevlog = self.file(fname)
2095 2097 if not len(filerevlog):
2096 2098 raise util.Abort(_("empty or missing revlog for %s")
2097 2099 % fname)
2098 2100 fstate[0] = fname
2099 2101 fstate[1] = fnodes.pop(fname, {})
2100 2102
2101 2103 nodelist = prune(filerevlog, fstate[1])
2102 2104 if nodelist:
2103 2105 count[0] += 1
2104 2106 yield bundler.fileheader(fname)
2105 2107 for chunk in filerevlog.group(nodelist, bundler, reorder):
2106 2108 yield chunk
2107 2109
2108 2110 # Signal that no more groups are left.
2109 2111 yield bundler.close()
2110 2112 progress(_bundling, None)
2111 2113
2112 2114 if csets:
2113 2115 self.hook('outgoing', node=hex(csets[0]), source=source)
2114 2116
2115 2117 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2116 2118
2117 2119 def changegroup(self, basenodes, source):
2118 2120 # to avoid a race we use changegroupsubset() (issue1320)
2119 2121 return self.changegroupsubset(basenodes, self.heads(), source)
2120 2122
2121 2123 @unfilteredmethod
2122 2124 def _changegroup(self, nodes, source):
2123 2125 """Compute the changegroup of all nodes that we have that a recipient
2124 2126 doesn't. Return a chunkbuffer object whose read() method will return
2125 2127 successive changegroup chunks.
2126 2128
2127 2129 This is much easier than the previous function as we can assume that
2128 2130 the recipient has any changenode we aren't sending them.
2129 2131
2130 2132 nodes is the set of nodes to send"""
2131 2133
2132 2134 cl = self.changelog
2133 2135 mf = self.manifest
2134 2136 mfs = {}
2135 2137 changedfiles = set()
2136 2138 fstate = ['']
2137 2139 count = [0, 0]
2138 2140
2139 2141 self.hook('preoutgoing', throw=True, source=source)
2140 2142 self.changegroupinfo(nodes, source)
2141 2143
2142 2144 revset = set([cl.rev(n) for n in nodes])
2143 2145
2144 2146 def gennodelst(log):
2145 2147 ln, llr = log.node, log.linkrev
2146 2148 return [ln(r) for r in log if llr(r) in revset]
2147 2149
2148 2150 progress = self.ui.progress
2149 2151 _bundling = _('bundling')
2150 2152 _changesets = _('changesets')
2151 2153 _manifests = _('manifests')
2152 2154 _files = _('files')
2153 2155
2154 2156 def lookup(revlog, x):
2155 2157 if revlog == cl:
2156 2158 c = cl.read(x)
2157 2159 changedfiles.update(c[3])
2158 2160 mfs.setdefault(c[0], x)
2159 2161 count[0] += 1
2160 2162 progress(_bundling, count[0],
2161 2163 unit=_changesets, total=count[1])
2162 2164 return x
2163 2165 elif revlog == mf:
2164 2166 count[0] += 1
2165 2167 progress(_bundling, count[0],
2166 2168 unit=_manifests, total=count[1])
2167 2169 return cl.node(revlog.linkrev(revlog.rev(x)))
2168 2170 else:
2169 2171 progress(_bundling, count[0], item=fstate[0],
2170 2172 total=count[1], unit=_files)
2171 2173 return cl.node(revlog.linkrev(revlog.rev(x)))
2172 2174
2173 2175 bundler = changegroup.bundle10(lookup)
2174 2176 reorder = self.ui.config('bundle', 'reorder', 'auto')
2175 2177 if reorder == 'auto':
2176 2178 reorder = None
2177 2179 else:
2178 2180 reorder = util.parsebool(reorder)
2179 2181
2180 2182 def gengroup():
2181 2183 '''yield a sequence of changegroup chunks (strings)'''
2182 2184 # construct a list of all changed files
2183 2185
2184 2186 count[:] = [0, len(nodes)]
2185 2187 for chunk in cl.group(nodes, bundler, reorder=reorder):
2186 2188 yield chunk
2187 2189 progress(_bundling, None)
2188 2190
2189 2191 count[:] = [0, len(mfs)]
2190 2192 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2191 2193 yield chunk
2192 2194 progress(_bundling, None)
2193 2195
2194 2196 count[:] = [0, len(changedfiles)]
2195 2197 for fname in sorted(changedfiles):
2196 2198 filerevlog = self.file(fname)
2197 2199 if not len(filerevlog):
2198 2200 raise util.Abort(_("empty or missing revlog for %s")
2199 2201 % fname)
2200 2202 fstate[0] = fname
2201 2203 nodelist = gennodelst(filerevlog)
2202 2204 if nodelist:
2203 2205 count[0] += 1
2204 2206 yield bundler.fileheader(fname)
2205 2207 for chunk in filerevlog.group(nodelist, bundler, reorder):
2206 2208 yield chunk
2207 2209 yield bundler.close()
2208 2210 progress(_bundling, None)
2209 2211
2210 2212 if nodes:
2211 2213 self.hook('outgoing', node=hex(nodes[0]), source=source)
2212 2214
2213 2215 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2214 2216
2215 2217 @unfilteredmethod
2216 2218 def addchangegroup(self, source, srctype, url, emptyok=False):
2217 2219 """Add the changegroup returned by source.read() to this repo.
2218 2220 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2219 2221 the URL of the repo where this changegroup is coming from.
2220 2222
2221 2223 Return an integer summarizing the change to this repo:
2222 2224 - nothing changed or no source: 0
2223 2225 - more heads than before: 1+added heads (2..n)
2224 2226 - fewer heads than before: -1-removed heads (-2..-n)
2225 2227 - number of heads stays the same: 1
2226 2228 """
2227 2229 def csmap(x):
2228 2230 self.ui.debug("add changeset %s\n" % short(x))
2229 2231 return len(cl)
2230 2232
2231 2233 def revmap(x):
2232 2234 return cl.rev(x)
2233 2235
2234 2236 if not source:
2235 2237 return 0
2236 2238
2237 2239 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2238 2240
2239 2241 changesets = files = revisions = 0
2240 2242 efiles = set()
2241 2243
2242 2244 # write changelog data to temp files so concurrent readers will not see
2243 2245 # inconsistent view
2244 2246 cl = self.changelog
2245 2247 cl.delayupdate()
2246 2248 oldheads = cl.heads()
2247 2249
2248 2250 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2249 2251 try:
2250 2252 trp = weakref.proxy(tr)
2251 2253 # pull off the changeset group
2252 2254 self.ui.status(_("adding changesets\n"))
2253 2255 clstart = len(cl)
2254 2256 class prog(object):
2255 2257 step = _('changesets')
2256 2258 count = 1
2257 2259 ui = self.ui
2258 2260 total = None
2259 2261 def __call__(self):
2260 2262 self.ui.progress(self.step, self.count, unit=_('chunks'),
2261 2263 total=self.total)
2262 2264 self.count += 1
2263 2265 pr = prog()
2264 2266 source.callback = pr
2265 2267
2266 2268 source.changelogheader()
2267 2269 srccontent = cl.addgroup(source, csmap, trp)
2268 2270 if not (srccontent or emptyok):
2269 2271 raise util.Abort(_("received changelog group is empty"))
2270 2272 clend = len(cl)
2271 2273 changesets = clend - clstart
2272 2274 for c in xrange(clstart, clend):
2273 2275 efiles.update(self[c].files())
2274 2276 efiles = len(efiles)
2275 2277 self.ui.progress(_('changesets'), None)
2276 2278
2277 2279 # pull off the manifest group
2278 2280 self.ui.status(_("adding manifests\n"))
2279 2281 pr.step = _('manifests')
2280 2282 pr.count = 1
2281 2283 pr.total = changesets # manifests <= changesets
2282 2284 # no need to check for empty manifest group here:
2283 2285 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2284 2286 # no new manifest will be created and the manifest group will
2285 2287 # be empty during the pull
2286 2288 source.manifestheader()
2287 2289 self.manifest.addgroup(source, revmap, trp)
2288 2290 self.ui.progress(_('manifests'), None)
2289 2291
2290 2292 needfiles = {}
2291 2293 if self.ui.configbool('server', 'validate', default=False):
2292 2294 # validate incoming csets have their manifests
2293 2295 for cset in xrange(clstart, clend):
2294 2296 mfest = self.changelog.read(self.changelog.node(cset))[0]
2295 2297 mfest = self.manifest.readdelta(mfest)
2296 2298 # store file nodes we must see
2297 2299 for f, n in mfest.iteritems():
2298 2300 needfiles.setdefault(f, set()).add(n)
2299 2301
2300 2302 # process the files
2301 2303 self.ui.status(_("adding file changes\n"))
2302 2304 pr.step = _('files')
2303 2305 pr.count = 1
2304 2306 pr.total = efiles
2305 2307 source.callback = None
2306 2308
2307 2309 while True:
2308 2310 chunkdata = source.filelogheader()
2309 2311 if not chunkdata:
2310 2312 break
2311 2313 f = chunkdata["filename"]
2312 2314 self.ui.debug("adding %s revisions\n" % f)
2313 2315 pr()
2314 2316 fl = self.file(f)
2315 2317 o = len(fl)
2316 2318 if not fl.addgroup(source, revmap, trp):
2317 2319 raise util.Abort(_("received file revlog group is empty"))
2318 2320 revisions += len(fl) - o
2319 2321 files += 1
2320 2322 if f in needfiles:
2321 2323 needs = needfiles[f]
2322 2324 for new in xrange(o, len(fl)):
2323 2325 n = fl.node(new)
2324 2326 if n in needs:
2325 2327 needs.remove(n)
2326 2328 else:
2327 2329 raise util.Abort(
2328 2330 _("received spurious file revlog entry"))
2329 2331 if not needs:
2330 2332 del needfiles[f]
2331 2333 self.ui.progress(_('files'), None)
2332 2334
2333 2335 for f, needs in needfiles.iteritems():
2334 2336 fl = self.file(f)
2335 2337 for n in needs:
2336 2338 try:
2337 2339 fl.rev(n)
2338 2340 except error.LookupError:
2339 2341 raise util.Abort(
2340 2342 _('missing file data for %s:%s - run hg verify') %
2341 2343 (f, hex(n)))
2342 2344
2343 2345 dh = 0
2344 2346 if oldheads:
2345 2347 heads = cl.heads()
2346 2348 dh = len(heads) - len(oldheads)
2347 2349 for h in heads:
2348 2350 if h not in oldheads and self[h].closesbranch():
2349 2351 dh -= 1
2350 2352 htext = ""
2351 2353 if dh:
2352 2354 htext = _(" (%+d heads)") % dh
2353 2355
2354 2356 self.ui.status(_("added %d changesets"
2355 2357 " with %d changes to %d files%s\n")
2356 2358 % (changesets, revisions, files, htext))
2357 2359 self.invalidatevolatilesets()
2358 2360
2359 2361 if changesets > 0:
2360 2362 p = lambda: cl.writepending() and self.root or ""
2361 2363 self.hook('pretxnchangegroup', throw=True,
2362 2364 node=hex(cl.node(clstart)), source=srctype,
2363 2365 url=url, pending=p)
2364 2366
2365 2367 added = [cl.node(r) for r in xrange(clstart, clend)]
2366 2368 publishing = self.ui.configbool('phases', 'publish', True)
2367 2369 if srctype == 'push':
2368 2370 # Old server can not push the boundary themself.
2369 2371 # New server won't push the boundary if changeset already
2370 2372 # existed locally as secrete
2371 2373 #
2372 2374 # We should not use added here but the list of all change in
2373 2375 # the bundle
2374 2376 if publishing:
2375 2377 phases.advanceboundary(self, phases.public, srccontent)
2376 2378 else:
2377 2379 phases.advanceboundary(self, phases.draft, srccontent)
2378 2380 phases.retractboundary(self, phases.draft, added)
2379 2381 elif srctype != 'strip':
2380 2382 # publishing only alter behavior during push
2381 2383 #
2382 2384 # strip should not touch boundary at all
2383 2385 phases.retractboundary(self, phases.draft, added)
2384 2386
2385 2387 # make changelog see real files again
2386 2388 cl.finalize(trp)
2387 2389
2388 2390 tr.close()
2389 2391
2390 2392 if changesets > 0:
2391 2393 if srctype != 'strip':
2392 2394 # During strip, branchcache is invalid but coming call to
2393 2395 # `destroyed` will repair it.
2394 2396 # In other case we can safely update cache on disk.
2395 2397 branchmap.updatecache(self.filtered('served'))
2396 2398 def runhooks():
2397 2399 # forcefully update the on-disk branch cache
2398 2400 self.ui.debug("updating the branch cache\n")
2399 2401 self.hook("changegroup", node=hex(cl.node(clstart)),
2400 2402 source=srctype, url=url)
2401 2403
2402 2404 for n in added:
2403 2405 self.hook("incoming", node=hex(n), source=srctype,
2404 2406 url=url)
2405 2407
2406 2408 newheads = [h for h in self.heads() if h not in oldheads]
2407 2409 self.ui.log("incoming",
2408 2410 "%s incoming changes - new heads: %s\n",
2409 2411 len(added),
2410 2412 ', '.join([hex(c[:6]) for c in newheads]))
2411 2413 self._afterlock(runhooks)
2412 2414
2413 2415 finally:
2414 2416 tr.release()
2415 2417 # never return 0 here:
2416 2418 if dh < 0:
2417 2419 return dh - 1
2418 2420 else:
2419 2421 return dh + 1
2420 2422
2421 2423 def stream_in(self, remote, requirements):
2422 2424 lock = self.lock()
2423 2425 try:
2424 2426 # Save remote branchmap. We will use it later
2425 2427 # to speed up branchcache creation
2426 2428 rbranchmap = None
2427 2429 if remote.capable("branchmap"):
2428 2430 rbranchmap = remote.branchmap()
2429 2431
2430 2432 fp = remote.stream_out()
2431 2433 l = fp.readline()
2432 2434 try:
2433 2435 resp = int(l)
2434 2436 except ValueError:
2435 2437 raise error.ResponseError(
2436 2438 _('unexpected response from remote server:'), l)
2437 2439 if resp == 1:
2438 2440 raise util.Abort(_('operation forbidden by server'))
2439 2441 elif resp == 2:
2440 2442 raise util.Abort(_('locking the remote repository failed'))
2441 2443 elif resp != 0:
2442 2444 raise util.Abort(_('the server sent an unknown error code'))
2443 2445 self.ui.status(_('streaming all changes\n'))
2444 2446 l = fp.readline()
2445 2447 try:
2446 2448 total_files, total_bytes = map(int, l.split(' ', 1))
2447 2449 except (ValueError, TypeError):
2448 2450 raise error.ResponseError(
2449 2451 _('unexpected response from remote server:'), l)
2450 2452 self.ui.status(_('%d files to transfer, %s of data\n') %
2451 2453 (total_files, util.bytecount(total_bytes)))
2452 2454 handled_bytes = 0
2453 2455 self.ui.progress(_('clone'), 0, total=total_bytes)
2454 2456 start = time.time()
2455 2457 for i in xrange(total_files):
2456 2458 # XXX doesn't support '\n' or '\r' in filenames
2457 2459 l = fp.readline()
2458 2460 try:
2459 2461 name, size = l.split('\0', 1)
2460 2462 size = int(size)
2461 2463 except (ValueError, TypeError):
2462 2464 raise error.ResponseError(
2463 2465 _('unexpected response from remote server:'), l)
2464 2466 if self.ui.debugflag:
2465 2467 self.ui.debug('adding %s (%s)\n' %
2466 2468 (name, util.bytecount(size)))
2467 2469 # for backwards compat, name was partially encoded
2468 2470 ofp = self.sopener(store.decodedir(name), 'w')
2469 2471 for chunk in util.filechunkiter(fp, limit=size):
2470 2472 handled_bytes += len(chunk)
2471 2473 self.ui.progress(_('clone'), handled_bytes,
2472 2474 total=total_bytes)
2473 2475 ofp.write(chunk)
2474 2476 ofp.close()
2475 2477 elapsed = time.time() - start
2476 2478 if elapsed <= 0:
2477 2479 elapsed = 0.001
2478 2480 self.ui.progress(_('clone'), None)
2479 2481 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2480 2482 (util.bytecount(total_bytes), elapsed,
2481 2483 util.bytecount(total_bytes / elapsed)))
2482 2484
2483 2485 # new requirements = old non-format requirements +
2484 2486 # new format-related
2485 2487 # requirements from the streamed-in repository
2486 2488 requirements.update(set(self.requirements) - self.supportedformats)
2487 2489 self._applyrequirements(requirements)
2488 2490 self._writerequirements()
2489 2491
2490 2492 if rbranchmap:
2491 2493 rbheads = []
2492 2494 for bheads in rbranchmap.itervalues():
2493 2495 rbheads.extend(bheads)
2494 2496
2495 2497 if rbheads:
2496 2498 rtiprev = max((int(self.changelog.rev(node))
2497 2499 for node in rbheads))
2498 2500 cache = branchmap.branchcache(rbranchmap,
2499 2501 self[rtiprev].node(),
2500 2502 rtiprev)
2501 2503 # Try to stick it as low as possible
2502 2504 # filter above served are unlikely to be fetch from a clone
2503 2505 for candidate in ('base', 'immutable', 'served'):
2504 2506 rview = self.filtered(candidate)
2505 2507 if cache.validfor(rview):
2506 2508 self._branchcaches[candidate] = cache
2507 2509 cache.write(rview)
2508 2510 break
2509 2511 self.invalidate()
2510 2512 return len(self.heads()) + 1
2511 2513 finally:
2512 2514 lock.release()
2513 2515
2514 2516 def clone(self, remote, heads=[], stream=False):
2515 2517 '''clone remote repository.
2516 2518
2517 2519 keyword arguments:
2518 2520 heads: list of revs to clone (forces use of pull)
2519 2521 stream: use streaming clone if possible'''
2520 2522
2521 2523 # now, all clients that can request uncompressed clones can
2522 2524 # read repo formats supported by all servers that can serve
2523 2525 # them.
2524 2526
2525 2527 # if revlog format changes, client will have to check version
2526 2528 # and format flags on "stream" capability, and use
2527 2529 # uncompressed only if compatible.
2528 2530
2529 2531 if not stream:
2530 2532 # if the server explicitly prefers to stream (for fast LANs)
2531 2533 stream = remote.capable('stream-preferred')
2532 2534
2533 2535 if stream and not heads:
2534 2536 # 'stream' means remote revlog format is revlogv1 only
2535 2537 if remote.capable('stream'):
2536 2538 return self.stream_in(remote, set(('revlogv1',)))
2537 2539 # otherwise, 'streamreqs' contains the remote revlog format
2538 2540 streamreqs = remote.capable('streamreqs')
2539 2541 if streamreqs:
2540 2542 streamreqs = set(streamreqs.split(','))
2541 2543 # if we support it, stream in and adjust our requirements
2542 2544 if not streamreqs - self.supportedformats:
2543 2545 return self.stream_in(remote, streamreqs)
2544 2546 return self.pull(remote, heads)
2545 2547
2546 2548 def pushkey(self, namespace, key, old, new):
2547 2549 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2548 2550 old=old, new=new)
2549 2551 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2550 2552 ret = pushkey.push(self, namespace, key, old, new)
2551 2553 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2552 2554 ret=ret)
2553 2555 return ret
2554 2556
2555 2557 def listkeys(self, namespace):
2556 2558 self.hook('prelistkeys', throw=True, namespace=namespace)
2557 2559 self.ui.debug('listing keys for "%s"\n' % namespace)
2558 2560 values = pushkey.list(self, namespace)
2559 2561 self.hook('listkeys', namespace=namespace, values=values)
2560 2562 return values
2561 2563
2562 2564 def debugwireargs(self, one, two, three=None, four=None, five=None):
2563 2565 '''used to test argument passing over the wire'''
2564 2566 return "%s %s %s %s %s" % (one, two, three, four, five)
2565 2567
2566 2568 def savecommitmessage(self, text):
2567 2569 fp = self.opener('last-message.txt', 'wb')
2568 2570 try:
2569 2571 fp.write(text)
2570 2572 finally:
2571 2573 fp.close()
2572 2574 return self.pathto(fp.name[len(self.root) + 1:])
2573 2575
2574 2576 # used to avoid circular references so destructors work
2575 2577 def aftertrans(files):
2576 2578 renamefiles = [tuple(t) for t in files]
2577 2579 def a():
2578 for src, dest in renamefiles:
2580 for vfs, src, dest in renamefiles:
2579 2581 try:
2580 util.rename(src, dest)
2582 vfs.rename(src, dest)
2581 2583 except OSError: # journal file does not yet exist
2582 2584 pass
2583 2585 return a
2584 2586
2585 2587 def undoname(fn):
2586 2588 base, name = os.path.split(fn)
2587 2589 assert name.startswith('journal')
2588 2590 return os.path.join(base, name.replace('journal', 'undo', 1))
2589 2591
2590 2592 def instance(ui, path, create):
2591 2593 return localrepository(ui, util.urllocalpath(path), create)
2592 2594
2593 2595 def islocal(path):
2594 2596 return True
General Comments 0
You need to be logged in to leave comments. Login now