##// END OF EJS Templates
clfilter: ensure changeset creation in the repo is run unfiltered...
Pierre-Yves David -
r18000:f9459bcd default
parent child Browse files
Show More
@@ -1,2651 +1,2654 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 def unfilteredmeth(orig):
27 27 """decorate method that always need to be run on unfiltered version"""
28 28 def wrapper(repo, *args, **kwargs):
29 29 return orig(repo.unfiltered(), *args, **kwargs)
30 30 return wrapper
31 31
32 32 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
33 33 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
34 34
35 35 class localpeer(peer.peerrepository):
36 36 '''peer for a local repo; reflects only the most recent API'''
37 37
38 38 def __init__(self, repo, caps=MODERNCAPS):
39 39 peer.peerrepository.__init__(self)
40 40 self._repo = repo
41 41 self.ui = repo.ui
42 42 self._caps = repo._restrictcapabilities(caps)
43 43 self.requirements = repo.requirements
44 44 self.supportedformats = repo.supportedformats
45 45
46 46 def close(self):
47 47 self._repo.close()
48 48
49 49 def _capabilities(self):
50 50 return self._caps
51 51
52 52 def local(self):
53 53 return self._repo
54 54
55 55 def canpush(self):
56 56 return True
57 57
58 58 def url(self):
59 59 return self._repo.url()
60 60
61 61 def lookup(self, key):
62 62 return self._repo.lookup(key)
63 63
64 64 def branchmap(self):
65 65 return discovery.visiblebranchmap(self._repo)
66 66
67 67 def heads(self):
68 68 return discovery.visibleheads(self._repo)
69 69
70 70 def known(self, nodes):
71 71 return self._repo.known(nodes)
72 72
73 73 def getbundle(self, source, heads=None, common=None):
74 74 return self._repo.getbundle(source, heads=heads, common=common)
75 75
76 76 # TODO We might want to move the next two calls into legacypeer and add
77 77 # unbundle instead.
78 78
79 79 def lock(self):
80 80 return self._repo.lock()
81 81
82 82 def addchangegroup(self, cg, source, url):
83 83 return self._repo.addchangegroup(cg, source, url)
84 84
85 85 def pushkey(self, namespace, key, old, new):
86 86 return self._repo.pushkey(namespace, key, old, new)
87 87
88 88 def listkeys(self, namespace):
89 89 return self._repo.listkeys(namespace)
90 90
91 91 def debugwireargs(self, one, two, three=None, four=None, five=None):
92 92 '''used to test argument passing over the wire'''
93 93 return "%s %s %s %s %s" % (one, two, three, four, five)
94 94
95 95 class locallegacypeer(localpeer):
96 96 '''peer extension which implements legacy methods too; used for tests with
97 97 restricted capabilities'''
98 98
99 99 def __init__(self, repo):
100 100 localpeer.__init__(self, repo, caps=LEGACYCAPS)
101 101
102 102 def branches(self, nodes):
103 103 return self._repo.branches(nodes)
104 104
105 105 def between(self, pairs):
106 106 return self._repo.between(pairs)
107 107
108 108 def changegroup(self, basenodes, source):
109 109 return self._repo.changegroup(basenodes, source)
110 110
111 111 def changegroupsubset(self, bases, heads, source):
112 112 return self._repo.changegroupsubset(bases, heads, source)
113 113
114 114 class localrepository(object):
115 115
116 116 supportedformats = set(('revlogv1', 'generaldelta'))
117 117 supported = supportedformats | set(('store', 'fncache', 'shared',
118 118 'dotencode'))
119 119 openerreqs = set(('revlogv1', 'generaldelta'))
120 120 requirements = ['revlogv1']
121 121
122 122 def _baserequirements(self, create):
123 123 return self.requirements[:]
124 124
125 125 def __init__(self, baseui, path=None, create=False):
126 126 self.wvfs = scmutil.vfs(path, expand=True)
127 127 self.wopener = self.wvfs
128 128 self.root = self.wvfs.base
129 129 self.path = self.wvfs.join(".hg")
130 130 self.origroot = path
131 131 self.auditor = scmutil.pathauditor(self.root, self._checknested)
132 132 self.vfs = scmutil.vfs(self.path)
133 133 self.opener = self.vfs
134 134 self.baseui = baseui
135 135 self.ui = baseui.copy()
136 136 # A list of callback to shape the phase if no data were found.
137 137 # Callback are in the form: func(repo, roots) --> processed root.
138 138 # This list it to be filled by extension during repo setup
139 139 self._phasedefaults = []
140 140 try:
141 141 self.ui.readconfig(self.join("hgrc"), self.root)
142 142 extensions.loadall(self.ui)
143 143 except IOError:
144 144 pass
145 145
146 146 if not self.vfs.isdir():
147 147 if create:
148 148 if not self.wvfs.exists():
149 149 self.wvfs.makedirs()
150 150 self.vfs.makedir(notindexed=True)
151 151 requirements = self._baserequirements(create)
152 152 if self.ui.configbool('format', 'usestore', True):
153 153 self.vfs.mkdir("store")
154 154 requirements.append("store")
155 155 if self.ui.configbool('format', 'usefncache', True):
156 156 requirements.append("fncache")
157 157 if self.ui.configbool('format', 'dotencode', True):
158 158 requirements.append('dotencode')
159 159 # create an invalid changelog
160 160 self.vfs.append(
161 161 "00changelog.i",
162 162 '\0\0\0\2' # represents revlogv2
163 163 ' dummy changelog to prevent using the old repo layout'
164 164 )
165 165 if self.ui.configbool('format', 'generaldelta', False):
166 166 requirements.append("generaldelta")
167 167 requirements = set(requirements)
168 168 else:
169 169 raise error.RepoError(_("repository %s not found") % path)
170 170 elif create:
171 171 raise error.RepoError(_("repository %s already exists") % path)
172 172 else:
173 173 try:
174 174 requirements = scmutil.readrequires(self.vfs, self.supported)
175 175 except IOError, inst:
176 176 if inst.errno != errno.ENOENT:
177 177 raise
178 178 requirements = set()
179 179
180 180 self.sharedpath = self.path
181 181 try:
182 182 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
183 183 if not os.path.exists(s):
184 184 raise error.RepoError(
185 185 _('.hg/sharedpath points to nonexistent directory %s') % s)
186 186 self.sharedpath = s
187 187 except IOError, inst:
188 188 if inst.errno != errno.ENOENT:
189 189 raise
190 190
191 191 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
192 192 self.spath = self.store.path
193 193 self.svfs = self.store.vfs
194 194 self.sopener = self.svfs
195 195 self.sjoin = self.store.join
196 196 self.vfs.createmode = self.store.createmode
197 197 self._applyrequirements(requirements)
198 198 if create:
199 199 self._writerequirements()
200 200
201 201
202 202 self._branchcache = None
203 203 self._branchcachetip = None
204 204 self.filterpats = {}
205 205 self._datafilters = {}
206 206 self._transref = self._lockref = self._wlockref = None
207 207
208 208 # A cache for various files under .hg/ that tracks file changes,
209 209 # (used by the filecache decorator)
210 210 #
211 211 # Maps a property name to its util.filecacheentry
212 212 self._filecache = {}
213 213
214 214 def close(self):
215 215 pass
216 216
217 217 def _restrictcapabilities(self, caps):
218 218 return caps
219 219
220 220 def _applyrequirements(self, requirements):
221 221 self.requirements = requirements
222 222 self.sopener.options = dict((r, 1) for r in requirements
223 223 if r in self.openerreqs)
224 224
225 225 def _writerequirements(self):
226 226 reqfile = self.opener("requires", "w")
227 227 for r in self.requirements:
228 228 reqfile.write("%s\n" % r)
229 229 reqfile.close()
230 230
231 231 def _checknested(self, path):
232 232 """Determine if path is a legal nested repository."""
233 233 if not path.startswith(self.root):
234 234 return False
235 235 subpath = path[len(self.root) + 1:]
236 236 normsubpath = util.pconvert(subpath)
237 237
238 238 # XXX: Checking against the current working copy is wrong in
239 239 # the sense that it can reject things like
240 240 #
241 241 # $ hg cat -r 10 sub/x.txt
242 242 #
243 243 # if sub/ is no longer a subrepository in the working copy
244 244 # parent revision.
245 245 #
246 246 # However, it can of course also allow things that would have
247 247 # been rejected before, such as the above cat command if sub/
248 248 # is a subrepository now, but was a normal directory before.
249 249 # The old path auditor would have rejected by mistake since it
250 250 # panics when it sees sub/.hg/.
251 251 #
252 252 # All in all, checking against the working copy seems sensible
253 253 # since we want to prevent access to nested repositories on
254 254 # the filesystem *now*.
255 255 ctx = self[None]
256 256 parts = util.splitpath(subpath)
257 257 while parts:
258 258 prefix = '/'.join(parts)
259 259 if prefix in ctx.substate:
260 260 if prefix == normsubpath:
261 261 return True
262 262 else:
263 263 sub = ctx.sub(prefix)
264 264 return sub.checknested(subpath[len(prefix) + 1:])
265 265 else:
266 266 parts.pop()
267 267 return False
268 268
269 269 def peer(self):
270 270 return localpeer(self) # not cached to avoid reference cycle
271 271
272 272 def unfiltered(self):
273 273 """Return unfiltered version of the repository
274 274
275 275 Intended to be ovewritten by filtered repo."""
276 276 return self
277 277
278 278 @filecache('bookmarks')
279 279 def _bookmarks(self):
280 280 return bookmarks.bmstore(self)
281 281
282 282 @filecache('bookmarks.current')
283 283 def _bookmarkcurrent(self):
284 284 return bookmarks.readcurrent(self)
285 285
286 286 def bookmarkheads(self, bookmark):
287 287 name = bookmark.split('@', 1)[0]
288 288 heads = []
289 289 for mark, n in self._bookmarks.iteritems():
290 290 if mark.split('@', 1)[0] == name:
291 291 heads.append(n)
292 292 return heads
293 293
294 294 @storecache('phaseroots')
295 295 def _phasecache(self):
296 296 return phases.phasecache(self, self._phasedefaults)
297 297
298 298 @storecache('obsstore')
299 299 def obsstore(self):
300 300 store = obsolete.obsstore(self.sopener)
301 301 if store and not obsolete._enabled:
302 302 # message is rare enough to not be translated
303 303 msg = 'obsolete feature not enabled but %i markers found!\n'
304 304 self.ui.warn(msg % len(list(store)))
305 305 return store
306 306
307 307 @propertycache
308 308 def hiddenrevs(self):
309 309 """hiddenrevs: revs that should be hidden by command and tools
310 310
311 311 This set is carried on the repo to ease initialization and lazy
312 312 loading; it'll probably move back to changelog for efficiency and
313 313 consistency reasons.
314 314
315 315 Note that the hiddenrevs will needs invalidations when
316 316 - a new changesets is added (possible unstable above extinct)
317 317 - a new obsolete marker is added (possible new extinct changeset)
318 318
319 319 hidden changesets cannot have non-hidden descendants
320 320 """
321 321 hidden = set()
322 322 if self.obsstore:
323 323 ### hide extinct changeset that are not accessible by any mean
324 324 hiddenquery = 'extinct() - ::(. + bookmark())'
325 325 hidden.update(self.revs(hiddenquery))
326 326 return hidden
327 327
328 328 @storecache('00changelog.i')
329 329 def changelog(self):
330 330 c = changelog.changelog(self.sopener)
331 331 if 'HG_PENDING' in os.environ:
332 332 p = os.environ['HG_PENDING']
333 333 if p.startswith(self.root):
334 334 c.readpending('00changelog.i.a')
335 335 return c
336 336
337 337 @storecache('00manifest.i')
338 338 def manifest(self):
339 339 return manifest.manifest(self.sopener)
340 340
341 341 @filecache('dirstate')
342 342 def dirstate(self):
343 343 warned = [0]
344 344 def validate(node):
345 345 try:
346 346 self.changelog.rev(node)
347 347 return node
348 348 except error.LookupError:
349 349 if not warned[0]:
350 350 warned[0] = True
351 351 self.ui.warn(_("warning: ignoring unknown"
352 352 " working parent %s!\n") % short(node))
353 353 return nullid
354 354
355 355 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
356 356
357 357 def __getitem__(self, changeid):
358 358 if changeid is None:
359 359 return context.workingctx(self)
360 360 return context.changectx(self, changeid)
361 361
362 362 def __contains__(self, changeid):
363 363 try:
364 364 return bool(self.lookup(changeid))
365 365 except error.RepoLookupError:
366 366 return False
367 367
368 368 def __nonzero__(self):
369 369 return True
370 370
371 371 def __len__(self):
372 372 return len(self.changelog)
373 373
374 374 def __iter__(self):
375 375 return iter(self.changelog)
376 376
377 377 def revs(self, expr, *args):
378 378 '''Return a list of revisions matching the given revset'''
379 379 expr = revset.formatspec(expr, *args)
380 380 m = revset.match(None, expr)
381 381 return [r for r in m(self, list(self))]
382 382
383 383 def set(self, expr, *args):
384 384 '''
385 385 Yield a context for each matching revision, after doing arg
386 386 replacement via revset.formatspec
387 387 '''
388 388 for r in self.revs(expr, *args):
389 389 yield self[r]
390 390
391 391 def url(self):
392 392 return 'file:' + self.root
393 393
394 394 def hook(self, name, throw=False, **args):
395 395 return hook.hook(self.ui, self, name, throw, **args)
396 396
397 397 @unfilteredmeth
398 398 def _tag(self, names, node, message, local, user, date, extra={}):
399 399 if isinstance(names, str):
400 400 names = (names,)
401 401
402 402 branches = self.branchmap()
403 403 for name in names:
404 404 self.hook('pretag', throw=True, node=hex(node), tag=name,
405 405 local=local)
406 406 if name in branches:
407 407 self.ui.warn(_("warning: tag %s conflicts with existing"
408 408 " branch name\n") % name)
409 409
410 410 def writetags(fp, names, munge, prevtags):
411 411 fp.seek(0, 2)
412 412 if prevtags and prevtags[-1] != '\n':
413 413 fp.write('\n')
414 414 for name in names:
415 415 m = munge and munge(name) or name
416 416 if (self._tagscache.tagtypes and
417 417 name in self._tagscache.tagtypes):
418 418 old = self.tags().get(name, nullid)
419 419 fp.write('%s %s\n' % (hex(old), m))
420 420 fp.write('%s %s\n' % (hex(node), m))
421 421 fp.close()
422 422
423 423 prevtags = ''
424 424 if local:
425 425 try:
426 426 fp = self.opener('localtags', 'r+')
427 427 except IOError:
428 428 fp = self.opener('localtags', 'a')
429 429 else:
430 430 prevtags = fp.read()
431 431
432 432 # local tags are stored in the current charset
433 433 writetags(fp, names, None, prevtags)
434 434 for name in names:
435 435 self.hook('tag', node=hex(node), tag=name, local=local)
436 436 return
437 437
438 438 try:
439 439 fp = self.wfile('.hgtags', 'rb+')
440 440 except IOError, e:
441 441 if e.errno != errno.ENOENT:
442 442 raise
443 443 fp = self.wfile('.hgtags', 'ab')
444 444 else:
445 445 prevtags = fp.read()
446 446
447 447 # committed tags are stored in UTF-8
448 448 writetags(fp, names, encoding.fromlocal, prevtags)
449 449
450 450 fp.close()
451 451
452 452 self.invalidatecaches()
453 453
454 454 if '.hgtags' not in self.dirstate:
455 455 self[None].add(['.hgtags'])
456 456
457 457 m = matchmod.exact(self.root, '', ['.hgtags'])
458 458 tagnode = self.commit(message, user, date, extra=extra, match=m)
459 459
460 460 for name in names:
461 461 self.hook('tag', node=hex(node), tag=name, local=local)
462 462
463 463 return tagnode
464 464
465 465 def tag(self, names, node, message, local, user, date):
466 466 '''tag a revision with one or more symbolic names.
467 467
468 468 names is a list of strings or, when adding a single tag, names may be a
469 469 string.
470 470
471 471 if local is True, the tags are stored in a per-repository file.
472 472 otherwise, they are stored in the .hgtags file, and a new
473 473 changeset is committed with the change.
474 474
475 475 keyword arguments:
476 476
477 477 local: whether to store tags in non-version-controlled file
478 478 (default False)
479 479
480 480 message: commit message to use if committing
481 481
482 482 user: name of user to use if committing
483 483
484 484 date: date tuple to use if committing'''
485 485
486 486 if not local:
487 487 for x in self.status()[:5]:
488 488 if '.hgtags' in x:
489 489 raise util.Abort(_('working copy of .hgtags is changed '
490 490 '(please commit .hgtags manually)'))
491 491
492 492 self.tags() # instantiate the cache
493 493 self._tag(names, node, message, local, user, date)
494 494
495 495 @propertycache
496 496 def _tagscache(self):
497 497 '''Returns a tagscache object that contains various tags related
498 498 caches.'''
499 499
500 500 # This simplifies its cache management by having one decorated
501 501 # function (this one) and the rest simply fetch things from it.
502 502 class tagscache(object):
503 503 def __init__(self):
504 504 # These two define the set of tags for this repository. tags
505 505 # maps tag name to node; tagtypes maps tag name to 'global' or
506 506 # 'local'. (Global tags are defined by .hgtags across all
507 507 # heads, and local tags are defined in .hg/localtags.)
508 508 # They constitute the in-memory cache of tags.
509 509 self.tags = self.tagtypes = None
510 510
511 511 self.nodetagscache = self.tagslist = None
512 512
513 513 cache = tagscache()
514 514 cache.tags, cache.tagtypes = self._findtags()
515 515
516 516 return cache
517 517
518 518 def tags(self):
519 519 '''return a mapping of tag to node'''
520 520 t = {}
521 521 if self.changelog.filteredrevs:
522 522 tags, tt = self._findtags()
523 523 else:
524 524 tags = self._tagscache.tags
525 525 for k, v in tags.iteritems():
526 526 try:
527 527 # ignore tags to unknown nodes
528 528 self.changelog.rev(v)
529 529 t[k] = v
530 530 except (error.LookupError, ValueError):
531 531 pass
532 532 return t
533 533
534 534 def _findtags(self):
535 535 '''Do the hard work of finding tags. Return a pair of dicts
536 536 (tags, tagtypes) where tags maps tag name to node, and tagtypes
537 537 maps tag name to a string like \'global\' or \'local\'.
538 538 Subclasses or extensions are free to add their own tags, but
539 539 should be aware that the returned dicts will be retained for the
540 540 duration of the localrepo object.'''
541 541
542 542 # XXX what tagtype should subclasses/extensions use? Currently
543 543 # mq and bookmarks add tags, but do not set the tagtype at all.
544 544 # Should each extension invent its own tag type? Should there
545 545 # be one tagtype for all such "virtual" tags? Or is the status
546 546 # quo fine?
547 547
548 548 alltags = {} # map tag name to (node, hist)
549 549 tagtypes = {}
550 550
551 551 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
552 552 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
553 553
554 554 # Build the return dicts. Have to re-encode tag names because
555 555 # the tags module always uses UTF-8 (in order not to lose info
556 556 # writing to the cache), but the rest of Mercurial wants them in
557 557 # local encoding.
558 558 tags = {}
559 559 for (name, (node, hist)) in alltags.iteritems():
560 560 if node != nullid:
561 561 tags[encoding.tolocal(name)] = node
562 562 tags['tip'] = self.changelog.tip()
563 563 tagtypes = dict([(encoding.tolocal(name), value)
564 564 for (name, value) in tagtypes.iteritems()])
565 565 return (tags, tagtypes)
566 566
567 567 def tagtype(self, tagname):
568 568 '''
569 569 return the type of the given tag. result can be:
570 570
571 571 'local' : a local tag
572 572 'global' : a global tag
573 573 None : tag does not exist
574 574 '''
575 575
576 576 return self._tagscache.tagtypes.get(tagname)
577 577
578 578 def tagslist(self):
579 579 '''return a list of tags ordered by revision'''
580 580 if not self._tagscache.tagslist:
581 581 l = []
582 582 for t, n in self.tags().iteritems():
583 583 r = self.changelog.rev(n)
584 584 l.append((r, t, n))
585 585 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
586 586
587 587 return self._tagscache.tagslist
588 588
589 589 def nodetags(self, node):
590 590 '''return the tags associated with a node'''
591 591 if not self._tagscache.nodetagscache:
592 592 nodetagscache = {}
593 593 for t, n in self._tagscache.tags.iteritems():
594 594 nodetagscache.setdefault(n, []).append(t)
595 595 for tags in nodetagscache.itervalues():
596 596 tags.sort()
597 597 self._tagscache.nodetagscache = nodetagscache
598 598 return self._tagscache.nodetagscache.get(node, [])
599 599
600 600 def nodebookmarks(self, node):
601 601 marks = []
602 602 for bookmark, n in self._bookmarks.iteritems():
603 603 if n == node:
604 604 marks.append(bookmark)
605 605 return sorted(marks)
606 606
607 607 def _branchtags(self, partial, lrev):
608 608 # TODO: rename this function?
609 609 tiprev = len(self) - 1
610 610 if lrev != tiprev:
611 611 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
612 612 self._updatebranchcache(partial, ctxgen)
613 613 self._writebranchcache(partial, self.changelog.tip(), tiprev)
614 614
615 615 return partial
616 616
617 617 @unfilteredmeth # Until we get a smarter cache management
618 618 def updatebranchcache(self):
619 619 tip = self.changelog.tip()
620 620 if self._branchcache is not None and self._branchcachetip == tip:
621 621 return
622 622
623 623 oldtip = self._branchcachetip
624 624 self._branchcachetip = tip
625 625 if oldtip is None or oldtip not in self.changelog.nodemap:
626 626 partial, last, lrev = self._readbranchcache()
627 627 else:
628 628 lrev = self.changelog.rev(oldtip)
629 629 partial = self._branchcache
630 630
631 631 self._branchtags(partial, lrev)
632 632 # this private cache holds all heads (not just the branch tips)
633 633 self._branchcache = partial
634 634
635 635 def branchmap(self):
636 636 '''returns a dictionary {branch: [branchheads]}'''
637 637 if self.changelog.filteredrevs:
638 638 # some changeset are excluded we can't use the cache
639 639 branchmap = {}
640 640 self._updatebranchcache(branchmap, (self[r] for r in self))
641 641 return branchmap
642 642 else:
643 643 self.updatebranchcache()
644 644 return self._branchcache
645 645
646 646
647 647 def _branchtip(self, heads):
648 648 '''return the tipmost branch head in heads'''
649 649 tip = heads[-1]
650 650 for h in reversed(heads):
651 651 if not self[h].closesbranch():
652 652 tip = h
653 653 break
654 654 return tip
655 655
656 656 def branchtip(self, branch):
657 657 '''return the tip node for a given branch'''
658 658 if branch not in self.branchmap():
659 659 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
660 660 return self._branchtip(self.branchmap()[branch])
661 661
662 662 def branchtags(self):
663 663 '''return a dict where branch names map to the tipmost head of
664 664 the branch, open heads come before closed'''
665 665 bt = {}
666 666 for bn, heads in self.branchmap().iteritems():
667 667 bt[bn] = self._branchtip(heads)
668 668 return bt
669 669
670 670 @unfilteredmeth # Until we get a smarter cache management
671 671 def _readbranchcache(self):
672 672 partial = {}
673 673 try:
674 674 f = self.opener("cache/branchheads")
675 675 lines = f.read().split('\n')
676 676 f.close()
677 677 except (IOError, OSError):
678 678 return {}, nullid, nullrev
679 679
680 680 try:
681 681 last, lrev = lines.pop(0).split(" ", 1)
682 682 last, lrev = bin(last), int(lrev)
683 683 if lrev >= len(self) or self[lrev].node() != last:
684 684 # invalidate the cache
685 685 raise ValueError('invalidating branch cache (tip differs)')
686 686 for l in lines:
687 687 if not l:
688 688 continue
689 689 node, label = l.split(" ", 1)
690 690 label = encoding.tolocal(label.strip())
691 691 if not node in self:
692 692 raise ValueError('invalidating branch cache because node '+
693 693 '%s does not exist' % node)
694 694 partial.setdefault(label, []).append(bin(node))
695 695 except KeyboardInterrupt:
696 696 raise
697 697 except Exception, inst:
698 698 if self.ui.debugflag:
699 699 self.ui.warn(str(inst), '\n')
700 700 partial, last, lrev = {}, nullid, nullrev
701 701 return partial, last, lrev
702 702
703 703 @unfilteredmeth # Until we get a smarter cache management
704 704 def _writebranchcache(self, branches, tip, tiprev):
705 705 try:
706 706 f = self.opener("cache/branchheads", "w", atomictemp=True)
707 707 f.write("%s %s\n" % (hex(tip), tiprev))
708 708 for label, nodes in branches.iteritems():
709 709 for node in nodes:
710 710 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
711 711 f.close()
712 712 except (IOError, OSError):
713 713 pass
714 714
715 715 @unfilteredmeth # Until we get a smarter cache management
716 716 def _updatebranchcache(self, partial, ctxgen):
717 717 """Given a branchhead cache, partial, that may have extra nodes or be
718 718 missing heads, and a generator of nodes that are at least a superset of
719 719 heads missing, this function updates partial to be correct.
720 720 """
721 721 # collect new branch entries
722 722 newbranches = {}
723 723 for c in ctxgen:
724 724 newbranches.setdefault(c.branch(), []).append(c.node())
725 725 # if older branchheads are reachable from new ones, they aren't
726 726 # really branchheads. Note checking parents is insufficient:
727 727 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
728 728 for branch, newnodes in newbranches.iteritems():
729 729 bheads = partial.setdefault(branch, [])
730 730 # Remove candidate heads that no longer are in the repo (e.g., as
731 731 # the result of a strip that just happened). Avoid using 'node in
732 732 # self' here because that dives down into branchcache code somewhat
733 733 # recursively.
734 734 bheadrevs = [self.changelog.rev(node) for node in bheads
735 735 if self.changelog.hasnode(node)]
736 736 newheadrevs = [self.changelog.rev(node) for node in newnodes
737 737 if self.changelog.hasnode(node)]
738 738 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
739 739 # Remove duplicates - nodes that are in newheadrevs and are already
740 740 # in bheadrevs. This can happen if you strip a node whose parent
741 741 # was already a head (because they're on different branches).
742 742 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
743 743
744 744 # Starting from tip means fewer passes over reachable. If we know
745 745 # the new candidates are not ancestors of existing heads, we don't
746 746 # have to examine ancestors of existing heads
747 747 if ctxisnew:
748 748 iterrevs = sorted(newheadrevs)
749 749 else:
750 750 iterrevs = list(bheadrevs)
751 751
752 752 # This loop prunes out two kinds of heads - heads that are
753 753 # superseded by a head in newheadrevs, and newheadrevs that are not
754 754 # heads because an existing head is their descendant.
755 755 while iterrevs:
756 756 latest = iterrevs.pop()
757 757 if latest not in bheadrevs:
758 758 continue
759 759 ancestors = set(self.changelog.ancestors([latest],
760 760 bheadrevs[0]))
761 761 if ancestors:
762 762 bheadrevs = [b for b in bheadrevs if b not in ancestors]
763 763 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
764 764
765 765 # There may be branches that cease to exist when the last commit in the
766 766 # branch was stripped. This code filters them out. Note that the
767 767 # branch that ceased to exist may not be in newbranches because
768 768 # newbranches is the set of candidate heads, which when you strip the
769 769 # last commit in a branch will be the parent branch.
770 770 for branch in partial.keys():
771 771 nodes = [head for head in partial[branch]
772 772 if self.changelog.hasnode(head)]
773 773 if not nodes:
774 774 del partial[branch]
775 775
776 776 def lookup(self, key):
777 777 return self[key].node()
778 778
779 779 def lookupbranch(self, key, remote=None):
780 780 repo = remote or self
781 781 if key in repo.branchmap():
782 782 return key
783 783
784 784 repo = (remote and remote.local()) and remote or self
785 785 return repo[key].branch()
786 786
787 787 def known(self, nodes):
788 788 nm = self.changelog.nodemap
789 789 pc = self._phasecache
790 790 result = []
791 791 for n in nodes:
792 792 r = nm.get(n)
793 793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 794 result.append(resp)
795 795 return result
796 796
797 797 def local(self):
798 798 return self
799 799
800 800 def cancopy(self):
801 801 return self.local() # so statichttprepo's override of local() works
802 802
803 803 def join(self, f):
804 804 return os.path.join(self.path, f)
805 805
806 806 def wjoin(self, f):
807 807 return os.path.join(self.root, f)
808 808
809 809 def file(self, f):
810 810 if f[0] == '/':
811 811 f = f[1:]
812 812 return filelog.filelog(self.sopener, f)
813 813
814 814 def changectx(self, changeid):
815 815 return self[changeid]
816 816
817 817 def parents(self, changeid=None):
818 818 '''get list of changectxs for parents of changeid'''
819 819 return self[changeid].parents()
820 820
821 821 def setparents(self, p1, p2=nullid):
822 822 copies = self.dirstate.setparents(p1, p2)
823 823 if copies:
824 824 # Adjust copy records, the dirstate cannot do it, it
825 825 # requires access to parents manifests. Preserve them
826 826 # only for entries added to first parent.
827 827 pctx = self[p1]
828 828 for f in copies:
829 829 if f not in pctx and copies[f] in pctx:
830 830 self.dirstate.copy(copies[f], f)
831 831
832 832 def filectx(self, path, changeid=None, fileid=None):
833 833 """changeid can be a changeset revision, node, or tag.
834 834 fileid can be a file revision or node."""
835 835 return context.filectx(self, path, changeid, fileid)
836 836
837 837 def getcwd(self):
838 838 return self.dirstate.getcwd()
839 839
840 840 def pathto(self, f, cwd=None):
841 841 return self.dirstate.pathto(f, cwd)
842 842
843 843 def wfile(self, f, mode='r'):
844 844 return self.wopener(f, mode)
845 845
846 846 def _link(self, f):
847 847 return os.path.islink(self.wjoin(f))
848 848
849 849 def _loadfilter(self, filter):
850 850 if filter not in self.filterpats:
851 851 l = []
852 852 for pat, cmd in self.ui.configitems(filter):
853 853 if cmd == '!':
854 854 continue
855 855 mf = matchmod.match(self.root, '', [pat])
856 856 fn = None
857 857 params = cmd
858 858 for name, filterfn in self._datafilters.iteritems():
859 859 if cmd.startswith(name):
860 860 fn = filterfn
861 861 params = cmd[len(name):].lstrip()
862 862 break
863 863 if not fn:
864 864 fn = lambda s, c, **kwargs: util.filter(s, c)
865 865 # Wrap old filters not supporting keyword arguments
866 866 if not inspect.getargspec(fn)[2]:
867 867 oldfn = fn
868 868 fn = lambda s, c, **kwargs: oldfn(s, c)
869 869 l.append((mf, fn, params))
870 870 self.filterpats[filter] = l
871 871 return self.filterpats[filter]
872 872
873 873 def _filter(self, filterpats, filename, data):
874 874 for mf, fn, cmd in filterpats:
875 875 if mf(filename):
876 876 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
877 877 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
878 878 break
879 879
880 880 return data
881 881
882 882 @propertycache
883 883 def _encodefilterpats(self):
884 884 return self._loadfilter('encode')
885 885
886 886 @propertycache
887 887 def _decodefilterpats(self):
888 888 return self._loadfilter('decode')
889 889
890 890 def adddatafilter(self, name, filter):
891 891 self._datafilters[name] = filter
892 892
893 893 def wread(self, filename):
894 894 if self._link(filename):
895 895 data = os.readlink(self.wjoin(filename))
896 896 else:
897 897 data = self.wopener.read(filename)
898 898 return self._filter(self._encodefilterpats, filename, data)
899 899
900 900 def wwrite(self, filename, data, flags):
901 901 data = self._filter(self._decodefilterpats, filename, data)
902 902 if 'l' in flags:
903 903 self.wopener.symlink(data, filename)
904 904 else:
905 905 self.wopener.write(filename, data)
906 906 if 'x' in flags:
907 907 util.setflags(self.wjoin(filename), False, True)
908 908
909 909 def wwritedata(self, filename, data):
910 910 return self._filter(self._decodefilterpats, filename, data)
911 911
912 912 def transaction(self, desc):
913 913 tr = self._transref and self._transref() or None
914 914 if tr and tr.running():
915 915 return tr.nest()
916 916
917 917 # abort here if the journal already exists
918 918 if os.path.exists(self.sjoin("journal")):
919 919 raise error.RepoError(
920 920 _("abandoned transaction found - run hg recover"))
921 921
922 922 self._writejournal(desc)
923 923 renames = [(x, undoname(x)) for x in self._journalfiles()]
924 924
925 925 tr = transaction.transaction(self.ui.warn, self.sopener,
926 926 self.sjoin("journal"),
927 927 aftertrans(renames),
928 928 self.store.createmode)
929 929 self._transref = weakref.ref(tr)
930 930 return tr
931 931
932 932 def _journalfiles(self):
933 933 return (self.sjoin('journal'), self.join('journal.dirstate'),
934 934 self.join('journal.branch'), self.join('journal.desc'),
935 935 self.join('journal.bookmarks'),
936 936 self.sjoin('journal.phaseroots'))
937 937
938 938 def undofiles(self):
939 939 return [undoname(x) for x in self._journalfiles()]
940 940
941 941 def _writejournal(self, desc):
942 942 self.opener.write("journal.dirstate",
943 943 self.opener.tryread("dirstate"))
944 944 self.opener.write("journal.branch",
945 945 encoding.fromlocal(self.dirstate.branch()))
946 946 self.opener.write("journal.desc",
947 947 "%d\n%s\n" % (len(self), desc))
948 948 self.opener.write("journal.bookmarks",
949 949 self.opener.tryread("bookmarks"))
950 950 self.sopener.write("journal.phaseroots",
951 951 self.sopener.tryread("phaseroots"))
952 952
953 953 def recover(self):
954 954 lock = self.lock()
955 955 try:
956 956 if os.path.exists(self.sjoin("journal")):
957 957 self.ui.status(_("rolling back interrupted transaction\n"))
958 958 transaction.rollback(self.sopener, self.sjoin("journal"),
959 959 self.ui.warn)
960 960 self.invalidate()
961 961 return True
962 962 else:
963 963 self.ui.warn(_("no interrupted transaction available\n"))
964 964 return False
965 965 finally:
966 966 lock.release()
967 967
968 968 def rollback(self, dryrun=False, force=False):
969 969 wlock = lock = None
970 970 try:
971 971 wlock = self.wlock()
972 972 lock = self.lock()
973 973 if os.path.exists(self.sjoin("undo")):
974 974 return self._rollback(dryrun, force)
975 975 else:
976 976 self.ui.warn(_("no rollback information available\n"))
977 977 return 1
978 978 finally:
979 979 release(lock, wlock)
980 980
981 981 @unfilteredmeth # Until we get smarter cache management
982 982 def _rollback(self, dryrun, force):
983 983 ui = self.ui
984 984 try:
985 985 args = self.opener.read('undo.desc').splitlines()
986 986 (oldlen, desc, detail) = (int(args[0]), args[1], None)
987 987 if len(args) >= 3:
988 988 detail = args[2]
989 989 oldtip = oldlen - 1
990 990
991 991 if detail and ui.verbose:
992 992 msg = (_('repository tip rolled back to revision %s'
993 993 ' (undo %s: %s)\n')
994 994 % (oldtip, desc, detail))
995 995 else:
996 996 msg = (_('repository tip rolled back to revision %s'
997 997 ' (undo %s)\n')
998 998 % (oldtip, desc))
999 999 except IOError:
1000 1000 msg = _('rolling back unknown transaction\n')
1001 1001 desc = None
1002 1002
1003 1003 if not force and self['.'] != self['tip'] and desc == 'commit':
1004 1004 raise util.Abort(
1005 1005 _('rollback of last commit while not checked out '
1006 1006 'may lose data'), hint=_('use -f to force'))
1007 1007
1008 1008 ui.status(msg)
1009 1009 if dryrun:
1010 1010 return 0
1011 1011
1012 1012 parents = self.dirstate.parents()
1013 1013 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1014 1014 if os.path.exists(self.join('undo.bookmarks')):
1015 1015 util.rename(self.join('undo.bookmarks'),
1016 1016 self.join('bookmarks'))
1017 1017 if os.path.exists(self.sjoin('undo.phaseroots')):
1018 1018 util.rename(self.sjoin('undo.phaseroots'),
1019 1019 self.sjoin('phaseroots'))
1020 1020 self.invalidate()
1021 1021
1022 1022 # Discard all cache entries to force reloading everything.
1023 1023 self._filecache.clear()
1024 1024
1025 1025 parentgone = (parents[0] not in self.changelog.nodemap or
1026 1026 parents[1] not in self.changelog.nodemap)
1027 1027 if parentgone:
1028 1028 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1029 1029 try:
1030 1030 branch = self.opener.read('undo.branch')
1031 1031 self.dirstate.setbranch(encoding.tolocal(branch))
1032 1032 except IOError:
1033 1033 ui.warn(_('named branch could not be reset: '
1034 1034 'current branch is still \'%s\'\n')
1035 1035 % self.dirstate.branch())
1036 1036
1037 1037 self.dirstate.invalidate()
1038 1038 parents = tuple([p.rev() for p in self.parents()])
1039 1039 if len(parents) > 1:
1040 1040 ui.status(_('working directory now based on '
1041 1041 'revisions %d and %d\n') % parents)
1042 1042 else:
1043 1043 ui.status(_('working directory now based on '
1044 1044 'revision %d\n') % parents)
1045 1045 # TODO: if we know which new heads may result from this rollback, pass
1046 1046 # them to destroy(), which will prevent the branchhead cache from being
1047 1047 # invalidated.
1048 1048 self.destroyed()
1049 1049 return 0
1050 1050
1051 1051 def invalidatecaches(self):
1052 1052 def delcache(name):
1053 1053 try:
1054 1054 delattr(self, name)
1055 1055 except AttributeError:
1056 1056 pass
1057 1057
1058 1058 delcache('_tagscache')
1059 1059
1060 1060 self.unfiltered()._branchcache = None # in UTF-8
1061 1061 self.unfiltered()._branchcachetip = None
1062 1062 obsolete.clearobscaches(self)
1063 1063
1064 1064 def invalidatedirstate(self):
1065 1065 '''Invalidates the dirstate, causing the next call to dirstate
1066 1066 to check if it was modified since the last time it was read,
1067 1067 rereading it if it has.
1068 1068
1069 1069 This is different to dirstate.invalidate() that it doesn't always
1070 1070 rereads the dirstate. Use dirstate.invalidate() if you want to
1071 1071 explicitly read the dirstate again (i.e. restoring it to a previous
1072 1072 known good state).'''
1073 1073 if 'dirstate' in self.__dict__:
1074 1074 for k in self.dirstate._filecache:
1075 1075 try:
1076 1076 delattr(self.dirstate, k)
1077 1077 except AttributeError:
1078 1078 pass
1079 1079 delattr(self.unfiltered(), 'dirstate')
1080 1080
1081 1081 def invalidate(self):
1082 1082 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1083 1083 for k in self._filecache:
1084 1084 # dirstate is invalidated separately in invalidatedirstate()
1085 1085 if k == 'dirstate':
1086 1086 continue
1087 1087
1088 1088 try:
1089 1089 delattr(unfiltered, k)
1090 1090 except AttributeError:
1091 1091 pass
1092 1092 self.invalidatecaches()
1093 1093
1094 1094 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1095 1095 try:
1096 1096 l = lock.lock(lockname, 0, releasefn, desc=desc)
1097 1097 except error.LockHeld, inst:
1098 1098 if not wait:
1099 1099 raise
1100 1100 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1101 1101 (desc, inst.locker))
1102 1102 # default to 600 seconds timeout
1103 1103 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1104 1104 releasefn, desc=desc)
1105 1105 if acquirefn:
1106 1106 acquirefn()
1107 1107 return l
1108 1108
1109 1109 def _afterlock(self, callback):
1110 1110 """add a callback to the current repository lock.
1111 1111
1112 1112 The callback will be executed on lock release."""
1113 1113 l = self._lockref and self._lockref()
1114 1114 if l:
1115 1115 l.postrelease.append(callback)
1116 1116 else:
1117 1117 callback()
1118 1118
1119 1119 def lock(self, wait=True):
1120 1120 '''Lock the repository store (.hg/store) and return a weak reference
1121 1121 to the lock. Use this before modifying the store (e.g. committing or
1122 1122 stripping). If you are opening a transaction, get a lock as well.)'''
1123 1123 l = self._lockref and self._lockref()
1124 1124 if l is not None and l.held:
1125 1125 l.lock()
1126 1126 return l
1127 1127
1128 1128 def unlock():
1129 1129 self.store.write()
1130 1130 if '_phasecache' in vars(self):
1131 1131 self._phasecache.write()
1132 1132 for k, ce in self._filecache.items():
1133 1133 if k == 'dirstate':
1134 1134 continue
1135 1135 ce.refresh()
1136 1136
1137 1137 l = self._lock(self.sjoin("lock"), wait, unlock,
1138 1138 self.invalidate, _('repository %s') % self.origroot)
1139 1139 self._lockref = weakref.ref(l)
1140 1140 return l
1141 1141
1142 1142 def wlock(self, wait=True):
1143 1143 '''Lock the non-store parts of the repository (everything under
1144 1144 .hg except .hg/store) and return a weak reference to the lock.
1145 1145 Use this before modifying files in .hg.'''
1146 1146 l = self._wlockref and self._wlockref()
1147 1147 if l is not None and l.held:
1148 1148 l.lock()
1149 1149 return l
1150 1150
1151 1151 def unlock():
1152 1152 self.dirstate.write()
1153 1153 ce = self._filecache.get('dirstate')
1154 1154 if ce:
1155 1155 ce.refresh()
1156 1156
1157 1157 l = self._lock(self.join("wlock"), wait, unlock,
1158 1158 self.invalidatedirstate, _('working directory of %s') %
1159 1159 self.origroot)
1160 1160 self._wlockref = weakref.ref(l)
1161 1161 return l
1162 1162
1163 1163 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1164 1164 """
1165 1165 commit an individual file as part of a larger transaction
1166 1166 """
1167 1167
1168 1168 fname = fctx.path()
1169 1169 text = fctx.data()
1170 1170 flog = self.file(fname)
1171 1171 fparent1 = manifest1.get(fname, nullid)
1172 1172 fparent2 = fparent2o = manifest2.get(fname, nullid)
1173 1173
1174 1174 meta = {}
1175 1175 copy = fctx.renamed()
1176 1176 if copy and copy[0] != fname:
1177 1177 # Mark the new revision of this file as a copy of another
1178 1178 # file. This copy data will effectively act as a parent
1179 1179 # of this new revision. If this is a merge, the first
1180 1180 # parent will be the nullid (meaning "look up the copy data")
1181 1181 # and the second one will be the other parent. For example:
1182 1182 #
1183 1183 # 0 --- 1 --- 3 rev1 changes file foo
1184 1184 # \ / rev2 renames foo to bar and changes it
1185 1185 # \- 2 -/ rev3 should have bar with all changes and
1186 1186 # should record that bar descends from
1187 1187 # bar in rev2 and foo in rev1
1188 1188 #
1189 1189 # this allows this merge to succeed:
1190 1190 #
1191 1191 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1192 1192 # \ / merging rev3 and rev4 should use bar@rev2
1193 1193 # \- 2 --- 4 as the merge base
1194 1194 #
1195 1195
1196 1196 cfname = copy[0]
1197 1197 crev = manifest1.get(cfname)
1198 1198 newfparent = fparent2
1199 1199
1200 1200 if manifest2: # branch merge
1201 1201 if fparent2 == nullid or crev is None: # copied on remote side
1202 1202 if cfname in manifest2:
1203 1203 crev = manifest2[cfname]
1204 1204 newfparent = fparent1
1205 1205
1206 1206 # find source in nearest ancestor if we've lost track
1207 1207 if not crev:
1208 1208 self.ui.debug(" %s: searching for copy revision for %s\n" %
1209 1209 (fname, cfname))
1210 1210 for ancestor in self[None].ancestors():
1211 1211 if cfname in ancestor:
1212 1212 crev = ancestor[cfname].filenode()
1213 1213 break
1214 1214
1215 1215 if crev:
1216 1216 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1217 1217 meta["copy"] = cfname
1218 1218 meta["copyrev"] = hex(crev)
1219 1219 fparent1, fparent2 = nullid, newfparent
1220 1220 else:
1221 1221 self.ui.warn(_("warning: can't find ancestor for '%s' "
1222 1222 "copied from '%s'!\n") % (fname, cfname))
1223 1223
1224 1224 elif fparent2 != nullid:
1225 1225 # is one parent an ancestor of the other?
1226 1226 fparentancestor = flog.ancestor(fparent1, fparent2)
1227 1227 if fparentancestor == fparent1:
1228 1228 fparent1, fparent2 = fparent2, nullid
1229 1229 elif fparentancestor == fparent2:
1230 1230 fparent2 = nullid
1231 1231
1232 1232 # is the file changed?
1233 1233 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1234 1234 changelist.append(fname)
1235 1235 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1236 1236
1237 1237 # are just the flags changed during merge?
1238 1238 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1239 1239 changelist.append(fname)
1240 1240
1241 1241 return fparent1
1242 1242
1243 @unfilteredmeth
1243 1244 def commit(self, text="", user=None, date=None, match=None, force=False,
1244 1245 editor=False, extra={}):
1245 1246 """Add a new revision to current repository.
1246 1247
1247 1248 Revision information is gathered from the working directory,
1248 1249 match can be used to filter the committed files. If editor is
1249 1250 supplied, it is called to get a commit message.
1250 1251 """
1251 1252
1252 1253 def fail(f, msg):
1253 1254 raise util.Abort('%s: %s' % (f, msg))
1254 1255
1255 1256 if not match:
1256 1257 match = matchmod.always(self.root, '')
1257 1258
1258 1259 if not force:
1259 1260 vdirs = []
1260 1261 match.dir = vdirs.append
1261 1262 match.bad = fail
1262 1263
1263 1264 wlock = self.wlock()
1264 1265 try:
1265 1266 wctx = self[None]
1266 1267 merge = len(wctx.parents()) > 1
1267 1268
1268 1269 if (not force and merge and match and
1269 1270 (match.files() or match.anypats())):
1270 1271 raise util.Abort(_('cannot partially commit a merge '
1271 1272 '(do not specify files or patterns)'))
1272 1273
1273 1274 changes = self.status(match=match, clean=force)
1274 1275 if force:
1275 1276 changes[0].extend(changes[6]) # mq may commit unchanged files
1276 1277
1277 1278 # check subrepos
1278 1279 subs = []
1279 1280 commitsubs = set()
1280 1281 newstate = wctx.substate.copy()
1281 1282 # only manage subrepos and .hgsubstate if .hgsub is present
1282 1283 if '.hgsub' in wctx:
1283 1284 # we'll decide whether to track this ourselves, thanks
1284 1285 if '.hgsubstate' in changes[0]:
1285 1286 changes[0].remove('.hgsubstate')
1286 1287 if '.hgsubstate' in changes[2]:
1287 1288 changes[2].remove('.hgsubstate')
1288 1289
1289 1290 # compare current state to last committed state
1290 1291 # build new substate based on last committed state
1291 1292 oldstate = wctx.p1().substate
1292 1293 for s in sorted(newstate.keys()):
1293 1294 if not match(s):
1294 1295 # ignore working copy, use old state if present
1295 1296 if s in oldstate:
1296 1297 newstate[s] = oldstate[s]
1297 1298 continue
1298 1299 if not force:
1299 1300 raise util.Abort(
1300 1301 _("commit with new subrepo %s excluded") % s)
1301 1302 if wctx.sub(s).dirty(True):
1302 1303 if not self.ui.configbool('ui', 'commitsubrepos'):
1303 1304 raise util.Abort(
1304 1305 _("uncommitted changes in subrepo %s") % s,
1305 1306 hint=_("use --subrepos for recursive commit"))
1306 1307 subs.append(s)
1307 1308 commitsubs.add(s)
1308 1309 else:
1309 1310 bs = wctx.sub(s).basestate()
1310 1311 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1311 1312 if oldstate.get(s, (None, None, None))[1] != bs:
1312 1313 subs.append(s)
1313 1314
1314 1315 # check for removed subrepos
1315 1316 for p in wctx.parents():
1316 1317 r = [s for s in p.substate if s not in newstate]
1317 1318 subs += [s for s in r if match(s)]
1318 1319 if subs:
1319 1320 if (not match('.hgsub') and
1320 1321 '.hgsub' in (wctx.modified() + wctx.added())):
1321 1322 raise util.Abort(
1322 1323 _("can't commit subrepos without .hgsub"))
1323 1324 changes[0].insert(0, '.hgsubstate')
1324 1325
1325 1326 elif '.hgsub' in changes[2]:
1326 1327 # clean up .hgsubstate when .hgsub is removed
1327 1328 if ('.hgsubstate' in wctx and
1328 1329 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1329 1330 changes[2].insert(0, '.hgsubstate')
1330 1331
1331 1332 # make sure all explicit patterns are matched
1332 1333 if not force and match.files():
1333 1334 matched = set(changes[0] + changes[1] + changes[2])
1334 1335
1335 1336 for f in match.files():
1336 1337 f = self.dirstate.normalize(f)
1337 1338 if f == '.' or f in matched or f in wctx.substate:
1338 1339 continue
1339 1340 if f in changes[3]: # missing
1340 1341 fail(f, _('file not found!'))
1341 1342 if f in vdirs: # visited directory
1342 1343 d = f + '/'
1343 1344 for mf in matched:
1344 1345 if mf.startswith(d):
1345 1346 break
1346 1347 else:
1347 1348 fail(f, _("no match under directory!"))
1348 1349 elif f not in self.dirstate:
1349 1350 fail(f, _("file not tracked!"))
1350 1351
1351 1352 if (not force and not extra.get("close") and not merge
1352 1353 and not (changes[0] or changes[1] or changes[2])
1353 1354 and wctx.branch() == wctx.p1().branch()):
1354 1355 return None
1355 1356
1356 1357 if merge and changes[3]:
1357 1358 raise util.Abort(_("cannot commit merge with missing files"))
1358 1359
1359 1360 ms = mergemod.mergestate(self)
1360 1361 for f in changes[0]:
1361 1362 if f in ms and ms[f] == 'u':
1362 1363 raise util.Abort(_("unresolved merge conflicts "
1363 1364 "(see hg help resolve)"))
1364 1365
1365 1366 cctx = context.workingctx(self, text, user, date, extra, changes)
1366 1367 if editor:
1367 1368 cctx._text = editor(self, cctx, subs)
1368 1369 edited = (text != cctx._text)
1369 1370
1370 1371 # commit subs and write new state
1371 1372 if subs:
1372 1373 for s in sorted(commitsubs):
1373 1374 sub = wctx.sub(s)
1374 1375 self.ui.status(_('committing subrepository %s\n') %
1375 1376 subrepo.subrelpath(sub))
1376 1377 sr = sub.commit(cctx._text, user, date)
1377 1378 newstate[s] = (newstate[s][0], sr)
1378 1379 subrepo.writestate(self, newstate)
1379 1380
1380 1381 # Save commit message in case this transaction gets rolled back
1381 1382 # (e.g. by a pretxncommit hook). Leave the content alone on
1382 1383 # the assumption that the user will use the same editor again.
1383 1384 msgfn = self.savecommitmessage(cctx._text)
1384 1385
1385 1386 p1, p2 = self.dirstate.parents()
1386 1387 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1387 1388 try:
1388 1389 self.hook("precommit", throw=True, parent1=hookp1,
1389 1390 parent2=hookp2)
1390 1391 ret = self.commitctx(cctx, True)
1391 1392 except: # re-raises
1392 1393 if edited:
1393 1394 self.ui.write(
1394 1395 _('note: commit message saved in %s\n') % msgfn)
1395 1396 raise
1396 1397
1397 1398 # update bookmarks, dirstate and mergestate
1398 1399 bookmarks.update(self, [p1, p2], ret)
1399 1400 for f in changes[0] + changes[1]:
1400 1401 self.dirstate.normal(f)
1401 1402 for f in changes[2]:
1402 1403 self.dirstate.drop(f)
1403 1404 self.dirstate.setparents(ret)
1404 1405 ms.reset()
1405 1406 finally:
1406 1407 wlock.release()
1407 1408
1408 1409 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1409 1410 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1410 1411 self._afterlock(commithook)
1411 1412 return ret
1412 1413
1414 @unfilteredmeth
1413 1415 def commitctx(self, ctx, error=False):
1414 1416 """Add a new revision to current repository.
1415 1417 Revision information is passed via the context argument.
1416 1418 """
1417 1419
1418 1420 tr = lock = None
1419 1421 removed = list(ctx.removed())
1420 1422 p1, p2 = ctx.p1(), ctx.p2()
1421 1423 user = ctx.user()
1422 1424
1423 1425 lock = self.lock()
1424 1426 try:
1425 1427 tr = self.transaction("commit")
1426 1428 trp = weakref.proxy(tr)
1427 1429
1428 1430 if ctx.files():
1429 1431 m1 = p1.manifest().copy()
1430 1432 m2 = p2.manifest()
1431 1433
1432 1434 # check in files
1433 1435 new = {}
1434 1436 changed = []
1435 1437 linkrev = len(self)
1436 1438 for f in sorted(ctx.modified() + ctx.added()):
1437 1439 self.ui.note(f + "\n")
1438 1440 try:
1439 1441 fctx = ctx[f]
1440 1442 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1441 1443 changed)
1442 1444 m1.set(f, fctx.flags())
1443 1445 except OSError, inst:
1444 1446 self.ui.warn(_("trouble committing %s!\n") % f)
1445 1447 raise
1446 1448 except IOError, inst:
1447 1449 errcode = getattr(inst, 'errno', errno.ENOENT)
1448 1450 if error or errcode and errcode != errno.ENOENT:
1449 1451 self.ui.warn(_("trouble committing %s!\n") % f)
1450 1452 raise
1451 1453 else:
1452 1454 removed.append(f)
1453 1455
1454 1456 # update manifest
1455 1457 m1.update(new)
1456 1458 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1457 1459 drop = [f for f in removed if f in m1]
1458 1460 for f in drop:
1459 1461 del m1[f]
1460 1462 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1461 1463 p2.manifestnode(), (new, drop))
1462 1464 files = changed + removed
1463 1465 else:
1464 1466 mn = p1.manifestnode()
1465 1467 files = []
1466 1468
1467 1469 # update changelog
1468 1470 self.changelog.delayupdate()
1469 1471 n = self.changelog.add(mn, files, ctx.description(),
1470 1472 trp, p1.node(), p2.node(),
1471 1473 user, ctx.date(), ctx.extra().copy())
1472 1474 p = lambda: self.changelog.writepending() and self.root or ""
1473 1475 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1474 1476 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1475 1477 parent2=xp2, pending=p)
1476 1478 self.changelog.finalize(trp)
1477 1479 # set the new commit is proper phase
1478 1480 targetphase = phases.newcommitphase(self.ui)
1479 1481 if targetphase:
1480 1482 # retract boundary do not alter parent changeset.
1481 1483 # if a parent have higher the resulting phase will
1482 1484 # be compliant anyway
1483 1485 #
1484 1486 # if minimal phase was 0 we don't need to retract anything
1485 1487 phases.retractboundary(self, targetphase, [n])
1486 1488 tr.close()
1487 1489 self.updatebranchcache()
1488 1490 return n
1489 1491 finally:
1490 1492 if tr:
1491 1493 tr.release()
1492 1494 lock.release()
1493 1495
1494 1496 @unfilteredmeth
1495 1497 def destroyed(self, newheadnodes=None):
1496 1498 '''Inform the repository that nodes have been destroyed.
1497 1499 Intended for use by strip and rollback, so there's a common
1498 1500 place for anything that has to be done after destroying history.
1499 1501
1500 1502 If you know the branchheadcache was uptodate before nodes were removed
1501 1503 and you also know the set of candidate new heads that may have resulted
1502 1504 from the destruction, you can set newheadnodes. This will enable the
1503 1505 code to update the branchheads cache, rather than having future code
1504 1506 decide it's invalid and regenerating it from scratch.
1505 1507 '''
1506 1508 # If we have info, newheadnodes, on how to update the branch cache, do
1507 1509 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1508 1510 # will be caught the next time it is read.
1509 1511 if newheadnodes:
1510 1512 tiprev = len(self) - 1
1511 1513 ctxgen = (self[node] for node in newheadnodes
1512 1514 if self.changelog.hasnode(node))
1513 1515 self._updatebranchcache(self._branchcache, ctxgen)
1514 1516 self._writebranchcache(self._branchcache, self.changelog.tip(),
1515 1517 tiprev)
1516 1518
1517 1519 # Ensure the persistent tag cache is updated. Doing it now
1518 1520 # means that the tag cache only has to worry about destroyed
1519 1521 # heads immediately after a strip/rollback. That in turn
1520 1522 # guarantees that "cachetip == currenttip" (comparing both rev
1521 1523 # and node) always means no nodes have been added or destroyed.
1522 1524
1523 1525 # XXX this is suboptimal when qrefresh'ing: we strip the current
1524 1526 # head, refresh the tag cache, then immediately add a new head.
1525 1527 # But I think doing it this way is necessary for the "instant
1526 1528 # tag cache retrieval" case to work.
1527 1529 self.invalidatecaches()
1528 1530
1529 1531 # Discard all cache entries to force reloading everything.
1530 1532 self._filecache.clear()
1531 1533
1532 1534 def walk(self, match, node=None):
1533 1535 '''
1534 1536 walk recursively through the directory tree or a given
1535 1537 changeset, finding all files matched by the match
1536 1538 function
1537 1539 '''
1538 1540 return self[node].walk(match)
1539 1541
1540 1542 def status(self, node1='.', node2=None, match=None,
1541 1543 ignored=False, clean=False, unknown=False,
1542 1544 listsubrepos=False):
1543 1545 """return status of files between two nodes or node and working
1544 1546 directory.
1545 1547
1546 1548 If node1 is None, use the first dirstate parent instead.
1547 1549 If node2 is None, compare node1 with working directory.
1548 1550 """
1549 1551
1550 1552 def mfmatches(ctx):
1551 1553 mf = ctx.manifest().copy()
1552 1554 if match.always():
1553 1555 return mf
1554 1556 for fn in mf.keys():
1555 1557 if not match(fn):
1556 1558 del mf[fn]
1557 1559 return mf
1558 1560
1559 1561 if isinstance(node1, context.changectx):
1560 1562 ctx1 = node1
1561 1563 else:
1562 1564 ctx1 = self[node1]
1563 1565 if isinstance(node2, context.changectx):
1564 1566 ctx2 = node2
1565 1567 else:
1566 1568 ctx2 = self[node2]
1567 1569
1568 1570 working = ctx2.rev() is None
1569 1571 parentworking = working and ctx1 == self['.']
1570 1572 match = match or matchmod.always(self.root, self.getcwd())
1571 1573 listignored, listclean, listunknown = ignored, clean, unknown
1572 1574
1573 1575 # load earliest manifest first for caching reasons
1574 1576 if not working and ctx2.rev() < ctx1.rev():
1575 1577 ctx2.manifest()
1576 1578
1577 1579 if not parentworking:
1578 1580 def bad(f, msg):
1579 1581 # 'f' may be a directory pattern from 'match.files()',
1580 1582 # so 'f not in ctx1' is not enough
1581 1583 if f not in ctx1 and f not in ctx1.dirs():
1582 1584 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1583 1585 match.bad = bad
1584 1586
1585 1587 if working: # we need to scan the working dir
1586 1588 subrepos = []
1587 1589 if '.hgsub' in self.dirstate:
1588 1590 subrepos = ctx2.substate.keys()
1589 1591 s = self.dirstate.status(match, subrepos, listignored,
1590 1592 listclean, listunknown)
1591 1593 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1592 1594
1593 1595 # check for any possibly clean files
1594 1596 if parentworking and cmp:
1595 1597 fixup = []
1596 1598 # do a full compare of any files that might have changed
1597 1599 for f in sorted(cmp):
1598 1600 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1599 1601 or ctx1[f].cmp(ctx2[f])):
1600 1602 modified.append(f)
1601 1603 else:
1602 1604 fixup.append(f)
1603 1605
1604 1606 # update dirstate for files that are actually clean
1605 1607 if fixup:
1606 1608 if listclean:
1607 1609 clean += fixup
1608 1610
1609 1611 try:
1610 1612 # updating the dirstate is optional
1611 1613 # so we don't wait on the lock
1612 1614 wlock = self.wlock(False)
1613 1615 try:
1614 1616 for f in fixup:
1615 1617 self.dirstate.normal(f)
1616 1618 finally:
1617 1619 wlock.release()
1618 1620 except error.LockError:
1619 1621 pass
1620 1622
1621 1623 if not parentworking:
1622 1624 mf1 = mfmatches(ctx1)
1623 1625 if working:
1624 1626 # we are comparing working dir against non-parent
1625 1627 # generate a pseudo-manifest for the working dir
1626 1628 mf2 = mfmatches(self['.'])
1627 1629 for f in cmp + modified + added:
1628 1630 mf2[f] = None
1629 1631 mf2.set(f, ctx2.flags(f))
1630 1632 for f in removed:
1631 1633 if f in mf2:
1632 1634 del mf2[f]
1633 1635 else:
1634 1636 # we are comparing two revisions
1635 1637 deleted, unknown, ignored = [], [], []
1636 1638 mf2 = mfmatches(ctx2)
1637 1639
1638 1640 modified, added, clean = [], [], []
1639 1641 withflags = mf1.withflags() | mf2.withflags()
1640 1642 for fn in mf2:
1641 1643 if fn in mf1:
1642 1644 if (fn not in deleted and
1643 1645 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1644 1646 (mf1[fn] != mf2[fn] and
1645 1647 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1646 1648 modified.append(fn)
1647 1649 elif listclean:
1648 1650 clean.append(fn)
1649 1651 del mf1[fn]
1650 1652 elif fn not in deleted:
1651 1653 added.append(fn)
1652 1654 removed = mf1.keys()
1653 1655
1654 1656 if working and modified and not self.dirstate._checklink:
1655 1657 # Symlink placeholders may get non-symlink-like contents
1656 1658 # via user error or dereferencing by NFS or Samba servers,
1657 1659 # so we filter out any placeholders that don't look like a
1658 1660 # symlink
1659 1661 sane = []
1660 1662 for f in modified:
1661 1663 if ctx2.flags(f) == 'l':
1662 1664 d = ctx2[f].data()
1663 1665 if len(d) >= 1024 or '\n' in d or util.binary(d):
1664 1666 self.ui.debug('ignoring suspect symlink placeholder'
1665 1667 ' "%s"\n' % f)
1666 1668 continue
1667 1669 sane.append(f)
1668 1670 modified = sane
1669 1671
1670 1672 r = modified, added, removed, deleted, unknown, ignored, clean
1671 1673
1672 1674 if listsubrepos:
1673 1675 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1674 1676 if working:
1675 1677 rev2 = None
1676 1678 else:
1677 1679 rev2 = ctx2.substate[subpath][1]
1678 1680 try:
1679 1681 submatch = matchmod.narrowmatcher(subpath, match)
1680 1682 s = sub.status(rev2, match=submatch, ignored=listignored,
1681 1683 clean=listclean, unknown=listunknown,
1682 1684 listsubrepos=True)
1683 1685 for rfiles, sfiles in zip(r, s):
1684 1686 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1685 1687 except error.LookupError:
1686 1688 self.ui.status(_("skipping missing subrepository: %s\n")
1687 1689 % subpath)
1688 1690
1689 1691 for l in r:
1690 1692 l.sort()
1691 1693 return r
1692 1694
1693 1695 def heads(self, start=None):
1694 1696 heads = self.changelog.heads(start)
1695 1697 # sort the output in rev descending order
1696 1698 return sorted(heads, key=self.changelog.rev, reverse=True)
1697 1699
1698 1700 def branchheads(self, branch=None, start=None, closed=False):
1699 1701 '''return a (possibly filtered) list of heads for the given branch
1700 1702
1701 1703 Heads are returned in topological order, from newest to oldest.
1702 1704 If branch is None, use the dirstate branch.
1703 1705 If start is not None, return only heads reachable from start.
1704 1706 If closed is True, return heads that are marked as closed as well.
1705 1707 '''
1706 1708 if branch is None:
1707 1709 branch = self[None].branch()
1708 1710 branches = self.branchmap()
1709 1711 if branch not in branches:
1710 1712 return []
1711 1713 # the cache returns heads ordered lowest to highest
1712 1714 bheads = list(reversed(branches[branch]))
1713 1715 if start is not None:
1714 1716 # filter out the heads that cannot be reached from startrev
1715 1717 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1716 1718 bheads = [h for h in bheads if h in fbheads]
1717 1719 if not closed:
1718 1720 bheads = [h for h in bheads if not self[h].closesbranch()]
1719 1721 return bheads
1720 1722
1721 1723 def branches(self, nodes):
1722 1724 if not nodes:
1723 1725 nodes = [self.changelog.tip()]
1724 1726 b = []
1725 1727 for n in nodes:
1726 1728 t = n
1727 1729 while True:
1728 1730 p = self.changelog.parents(n)
1729 1731 if p[1] != nullid or p[0] == nullid:
1730 1732 b.append((t, n, p[0], p[1]))
1731 1733 break
1732 1734 n = p[0]
1733 1735 return b
1734 1736
1735 1737 def between(self, pairs):
1736 1738 r = []
1737 1739
1738 1740 for top, bottom in pairs:
1739 1741 n, l, i = top, [], 0
1740 1742 f = 1
1741 1743
1742 1744 while n != bottom and n != nullid:
1743 1745 p = self.changelog.parents(n)[0]
1744 1746 if i == f:
1745 1747 l.append(n)
1746 1748 f = f * 2
1747 1749 n = p
1748 1750 i += 1
1749 1751
1750 1752 r.append(l)
1751 1753
1752 1754 return r
1753 1755
1754 1756 def pull(self, remote, heads=None, force=False):
1755 1757 # don't open transaction for nothing or you break future useful
1756 1758 # rollback call
1757 1759 tr = None
1758 1760 trname = 'pull\n' + util.hidepassword(remote.url())
1759 1761 lock = self.lock()
1760 1762 try:
1761 1763 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1762 1764 force=force)
1763 1765 common, fetch, rheads = tmp
1764 1766 if not fetch:
1765 1767 self.ui.status(_("no changes found\n"))
1766 1768 added = []
1767 1769 result = 0
1768 1770 else:
1769 1771 tr = self.transaction(trname)
1770 1772 if heads is None and list(common) == [nullid]:
1771 1773 self.ui.status(_("requesting all changes\n"))
1772 1774 elif heads is None and remote.capable('changegroupsubset'):
1773 1775 # issue1320, avoid a race if remote changed after discovery
1774 1776 heads = rheads
1775 1777
1776 1778 if remote.capable('getbundle'):
1777 1779 cg = remote.getbundle('pull', common=common,
1778 1780 heads=heads or rheads)
1779 1781 elif heads is None:
1780 1782 cg = remote.changegroup(fetch, 'pull')
1781 1783 elif not remote.capable('changegroupsubset'):
1782 1784 raise util.Abort(_("partial pull cannot be done because "
1783 1785 "other repository doesn't support "
1784 1786 "changegroupsubset."))
1785 1787 else:
1786 1788 cg = remote.changegroupsubset(fetch, heads, 'pull')
1787 1789 clstart = len(self.changelog)
1788 1790 result = self.addchangegroup(cg, 'pull', remote.url())
1789 1791 clend = len(self.changelog)
1790 1792 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1791 1793
1792 1794 # compute target subset
1793 1795 if heads is None:
1794 1796 # We pulled every thing possible
1795 1797 # sync on everything common
1796 1798 subset = common + added
1797 1799 else:
1798 1800 # We pulled a specific subset
1799 1801 # sync on this subset
1800 1802 subset = heads
1801 1803
1802 1804 # Get remote phases data from remote
1803 1805 remotephases = remote.listkeys('phases')
1804 1806 publishing = bool(remotephases.get('publishing', False))
1805 1807 if remotephases and not publishing:
1806 1808 # remote is new and unpublishing
1807 1809 pheads, _dr = phases.analyzeremotephases(self, subset,
1808 1810 remotephases)
1809 1811 phases.advanceboundary(self, phases.public, pheads)
1810 1812 phases.advanceboundary(self, phases.draft, subset)
1811 1813 else:
1812 1814 # Remote is old or publishing all common changesets
1813 1815 # should be seen as public
1814 1816 phases.advanceboundary(self, phases.public, subset)
1815 1817
1816 1818 if obsolete._enabled:
1817 1819 self.ui.debug('fetching remote obsolete markers\n')
1818 1820 remoteobs = remote.listkeys('obsolete')
1819 1821 if 'dump0' in remoteobs:
1820 1822 if tr is None:
1821 1823 tr = self.transaction(trname)
1822 1824 for key in sorted(remoteobs, reverse=True):
1823 1825 if key.startswith('dump'):
1824 1826 data = base85.b85decode(remoteobs[key])
1825 1827 self.obsstore.mergemarkers(tr, data)
1826 1828 if tr is not None:
1827 1829 tr.close()
1828 1830 finally:
1829 1831 if tr is not None:
1830 1832 tr.release()
1831 1833 lock.release()
1832 1834
1833 1835 return result
1834 1836
1835 1837 def checkpush(self, force, revs):
1836 1838 """Extensions can override this function if additional checks have
1837 1839 to be performed before pushing, or call it if they override push
1838 1840 command.
1839 1841 """
1840 1842 pass
1841 1843
1842 1844 def push(self, remote, force=False, revs=None, newbranch=False):
1843 1845 '''Push outgoing changesets (limited by revs) from the current
1844 1846 repository to remote. Return an integer:
1845 1847 - None means nothing to push
1846 1848 - 0 means HTTP error
1847 1849 - 1 means we pushed and remote head count is unchanged *or*
1848 1850 we have outgoing changesets but refused to push
1849 1851 - other values as described by addchangegroup()
1850 1852 '''
1851 1853 # there are two ways to push to remote repo:
1852 1854 #
1853 1855 # addchangegroup assumes local user can lock remote
1854 1856 # repo (local filesystem, old ssh servers).
1855 1857 #
1856 1858 # unbundle assumes local user cannot lock remote repo (new ssh
1857 1859 # servers, http servers).
1858 1860
1859 1861 if not remote.canpush():
1860 1862 raise util.Abort(_("destination does not support push"))
1861 1863 # get local lock as we might write phase data
1862 1864 locallock = self.lock()
1863 1865 try:
1864 1866 self.checkpush(force, revs)
1865 1867 lock = None
1866 1868 unbundle = remote.capable('unbundle')
1867 1869 if not unbundle:
1868 1870 lock = remote.lock()
1869 1871 try:
1870 1872 # discovery
1871 1873 fci = discovery.findcommonincoming
1872 1874 commoninc = fci(self, remote, force=force)
1873 1875 common, inc, remoteheads = commoninc
1874 1876 fco = discovery.findcommonoutgoing
1875 1877 outgoing = fco(self, remote, onlyheads=revs,
1876 1878 commoninc=commoninc, force=force)
1877 1879
1878 1880
1879 1881 if not outgoing.missing:
1880 1882 # nothing to push
1881 1883 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1882 1884 ret = None
1883 1885 else:
1884 1886 # something to push
1885 1887 if not force:
1886 1888 # if self.obsstore == False --> no obsolete
1887 1889 # then, save the iteration
1888 1890 if self.obsstore:
1889 1891 # this message are here for 80 char limit reason
1890 1892 mso = _("push includes obsolete changeset: %s!")
1891 1893 msu = _("push includes unstable changeset: %s!")
1892 1894 msb = _("push includes bumped changeset: %s!")
1893 1895 # If we are to push if there is at least one
1894 1896 # obsolete or unstable changeset in missing, at
1895 1897 # least one of the missinghead will be obsolete or
1896 1898 # unstable. So checking heads only is ok
1897 1899 for node in outgoing.missingheads:
1898 1900 ctx = self[node]
1899 1901 if ctx.obsolete():
1900 1902 raise util.Abort(mso % ctx)
1901 1903 elif ctx.unstable():
1902 1904 raise util.Abort(msu % ctx)
1903 1905 elif ctx.bumped():
1904 1906 raise util.Abort(msb % ctx)
1905 1907 discovery.checkheads(self, remote, outgoing,
1906 1908 remoteheads, newbranch,
1907 1909 bool(inc))
1908 1910
1909 1911 # create a changegroup from local
1910 1912 if revs is None and not outgoing.excluded:
1911 1913 # push everything,
1912 1914 # use the fast path, no race possible on push
1913 1915 cg = self._changegroup(outgoing.missing, 'push')
1914 1916 else:
1915 1917 cg = self.getlocalbundle('push', outgoing)
1916 1918
1917 1919 # apply changegroup to remote
1918 1920 if unbundle:
1919 1921 # local repo finds heads on server, finds out what
1920 1922 # revs it must push. once revs transferred, if server
1921 1923 # finds it has different heads (someone else won
1922 1924 # commit/push race), server aborts.
1923 1925 if force:
1924 1926 remoteheads = ['force']
1925 1927 # ssh: return remote's addchangegroup()
1926 1928 # http: return remote's addchangegroup() or 0 for error
1927 1929 ret = remote.unbundle(cg, remoteheads, 'push')
1928 1930 else:
1929 1931 # we return an integer indicating remote head count
1930 1932 # change
1931 1933 ret = remote.addchangegroup(cg, 'push', self.url())
1932 1934
1933 1935 if ret:
1934 1936 # push succeed, synchronize target of the push
1935 1937 cheads = outgoing.missingheads
1936 1938 elif revs is None:
1937 1939 # All out push fails. synchronize all common
1938 1940 cheads = outgoing.commonheads
1939 1941 else:
1940 1942 # I want cheads = heads(::missingheads and ::commonheads)
1941 1943 # (missingheads is revs with secret changeset filtered out)
1942 1944 #
1943 1945 # This can be expressed as:
1944 1946 # cheads = ( (missingheads and ::commonheads)
1945 1947 # + (commonheads and ::missingheads))"
1946 1948 # )
1947 1949 #
1948 1950 # while trying to push we already computed the following:
1949 1951 # common = (::commonheads)
1950 1952 # missing = ((commonheads::missingheads) - commonheads)
1951 1953 #
1952 1954 # We can pick:
1953 1955 # * missingheads part of common (::commonheads)
1954 1956 common = set(outgoing.common)
1955 1957 cheads = [node for node in revs if node in common]
1956 1958 # and
1957 1959 # * commonheads parents on missing
1958 1960 revset = self.set('%ln and parents(roots(%ln))',
1959 1961 outgoing.commonheads,
1960 1962 outgoing.missing)
1961 1963 cheads.extend(c.node() for c in revset)
1962 1964 # even when we don't push, exchanging phase data is useful
1963 1965 remotephases = remote.listkeys('phases')
1964 1966 if not remotephases: # old server or public only repo
1965 1967 phases.advanceboundary(self, phases.public, cheads)
1966 1968 # don't push any phase data as there is nothing to push
1967 1969 else:
1968 1970 ana = phases.analyzeremotephases(self, cheads, remotephases)
1969 1971 pheads, droots = ana
1970 1972 ### Apply remote phase on local
1971 1973 if remotephases.get('publishing', False):
1972 1974 phases.advanceboundary(self, phases.public, cheads)
1973 1975 else: # publish = False
1974 1976 phases.advanceboundary(self, phases.public, pheads)
1975 1977 phases.advanceboundary(self, phases.draft, cheads)
1976 1978 ### Apply local phase on remote
1977 1979
1978 1980 # Get the list of all revs draft on remote by public here.
1979 1981 # XXX Beware that revset break if droots is not strictly
1980 1982 # XXX root we may want to ensure it is but it is costly
1981 1983 outdated = self.set('heads((%ln::%ln) and public())',
1982 1984 droots, cheads)
1983 1985 for newremotehead in outdated:
1984 1986 r = remote.pushkey('phases',
1985 1987 newremotehead.hex(),
1986 1988 str(phases.draft),
1987 1989 str(phases.public))
1988 1990 if not r:
1989 1991 self.ui.warn(_('updating %s to public failed!\n')
1990 1992 % newremotehead)
1991 1993 self.ui.debug('try to push obsolete markers to remote\n')
1992 1994 if (obsolete._enabled and self.obsstore and
1993 1995 'obsolete' in remote.listkeys('namespaces')):
1994 1996 rslts = []
1995 1997 remotedata = self.listkeys('obsolete')
1996 1998 for key in sorted(remotedata, reverse=True):
1997 1999 # reverse sort to ensure we end with dump0
1998 2000 data = remotedata[key]
1999 2001 rslts.append(remote.pushkey('obsolete', key, '', data))
2000 2002 if [r for r in rslts if not r]:
2001 2003 msg = _('failed to push some obsolete markers!\n')
2002 2004 self.ui.warn(msg)
2003 2005 finally:
2004 2006 if lock is not None:
2005 2007 lock.release()
2006 2008 finally:
2007 2009 locallock.release()
2008 2010
2009 2011 self.ui.debug("checking for updated bookmarks\n")
2010 2012 rb = remote.listkeys('bookmarks')
2011 2013 for k in rb.keys():
2012 2014 if k in self._bookmarks:
2013 2015 nr, nl = rb[k], hex(self._bookmarks[k])
2014 2016 if nr in self:
2015 2017 cr = self[nr]
2016 2018 cl = self[nl]
2017 2019 if bookmarks.validdest(self, cr, cl):
2018 2020 r = remote.pushkey('bookmarks', k, nr, nl)
2019 2021 if r:
2020 2022 self.ui.status(_("updating bookmark %s\n") % k)
2021 2023 else:
2022 2024 self.ui.warn(_('updating bookmark %s'
2023 2025 ' failed!\n') % k)
2024 2026
2025 2027 return ret
2026 2028
2027 2029 def changegroupinfo(self, nodes, source):
2028 2030 if self.ui.verbose or source == 'bundle':
2029 2031 self.ui.status(_("%d changesets found\n") % len(nodes))
2030 2032 if self.ui.debugflag:
2031 2033 self.ui.debug("list of changesets:\n")
2032 2034 for node in nodes:
2033 2035 self.ui.debug("%s\n" % hex(node))
2034 2036
2035 2037 def changegroupsubset(self, bases, heads, source):
2036 2038 """Compute a changegroup consisting of all the nodes that are
2037 2039 descendants of any of the bases and ancestors of any of the heads.
2038 2040 Return a chunkbuffer object whose read() method will return
2039 2041 successive changegroup chunks.
2040 2042
2041 2043 It is fairly complex as determining which filenodes and which
2042 2044 manifest nodes need to be included for the changeset to be complete
2043 2045 is non-trivial.
2044 2046
2045 2047 Another wrinkle is doing the reverse, figuring out which changeset in
2046 2048 the changegroup a particular filenode or manifestnode belongs to.
2047 2049 """
2048 2050 cl = self.changelog
2049 2051 if not bases:
2050 2052 bases = [nullid]
2051 2053 csets, bases, heads = cl.nodesbetween(bases, heads)
2052 2054 # We assume that all ancestors of bases are known
2053 2055 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2054 2056 return self._changegroupsubset(common, csets, heads, source)
2055 2057
2056 2058 def getlocalbundle(self, source, outgoing):
2057 2059 """Like getbundle, but taking a discovery.outgoing as an argument.
2058 2060
2059 2061 This is only implemented for local repos and reuses potentially
2060 2062 precomputed sets in outgoing."""
2061 2063 if not outgoing.missing:
2062 2064 return None
2063 2065 return self._changegroupsubset(outgoing.common,
2064 2066 outgoing.missing,
2065 2067 outgoing.missingheads,
2066 2068 source)
2067 2069
2068 2070 def getbundle(self, source, heads=None, common=None):
2069 2071 """Like changegroupsubset, but returns the set difference between the
2070 2072 ancestors of heads and the ancestors common.
2071 2073
2072 2074 If heads is None, use the local heads. If common is None, use [nullid].
2073 2075
2074 2076 The nodes in common might not all be known locally due to the way the
2075 2077 current discovery protocol works.
2076 2078 """
2077 2079 cl = self.changelog
2078 2080 if common:
2079 2081 nm = cl.nodemap
2080 2082 common = [n for n in common if n in nm]
2081 2083 else:
2082 2084 common = [nullid]
2083 2085 if not heads:
2084 2086 heads = cl.heads()
2085 2087 return self.getlocalbundle(source,
2086 2088 discovery.outgoing(cl, common, heads))
2087 2089
2088 2090 @unfilteredmeth
2089 2091 def _changegroupsubset(self, commonrevs, csets, heads, source):
2090 2092
2091 2093 cl = self.changelog
2092 2094 mf = self.manifest
2093 2095 mfs = {} # needed manifests
2094 2096 fnodes = {} # needed file nodes
2095 2097 changedfiles = set()
2096 2098 fstate = ['', {}]
2097 2099 count = [0, 0]
2098 2100
2099 2101 # can we go through the fast path ?
2100 2102 heads.sort()
2101 2103 if heads == sorted(self.heads()):
2102 2104 return self._changegroup(csets, source)
2103 2105
2104 2106 # slow path
2105 2107 self.hook('preoutgoing', throw=True, source=source)
2106 2108 self.changegroupinfo(csets, source)
2107 2109
2108 2110 # filter any nodes that claim to be part of the known set
2109 2111 def prune(revlog, missing):
2110 2112 rr, rl = revlog.rev, revlog.linkrev
2111 2113 return [n for n in missing
2112 2114 if rl(rr(n)) not in commonrevs]
2113 2115
2114 2116 progress = self.ui.progress
2115 2117 _bundling = _('bundling')
2116 2118 _changesets = _('changesets')
2117 2119 _manifests = _('manifests')
2118 2120 _files = _('files')
2119 2121
2120 2122 def lookup(revlog, x):
2121 2123 if revlog == cl:
2122 2124 c = cl.read(x)
2123 2125 changedfiles.update(c[3])
2124 2126 mfs.setdefault(c[0], x)
2125 2127 count[0] += 1
2126 2128 progress(_bundling, count[0],
2127 2129 unit=_changesets, total=count[1])
2128 2130 return x
2129 2131 elif revlog == mf:
2130 2132 clnode = mfs[x]
2131 2133 mdata = mf.readfast(x)
2132 2134 for f, n in mdata.iteritems():
2133 2135 if f in changedfiles:
2134 2136 fnodes[f].setdefault(n, clnode)
2135 2137 count[0] += 1
2136 2138 progress(_bundling, count[0],
2137 2139 unit=_manifests, total=count[1])
2138 2140 return clnode
2139 2141 else:
2140 2142 progress(_bundling, count[0], item=fstate[0],
2141 2143 unit=_files, total=count[1])
2142 2144 return fstate[1][x]
2143 2145
2144 2146 bundler = changegroup.bundle10(lookup)
2145 2147 reorder = self.ui.config('bundle', 'reorder', 'auto')
2146 2148 if reorder == 'auto':
2147 2149 reorder = None
2148 2150 else:
2149 2151 reorder = util.parsebool(reorder)
2150 2152
2151 2153 def gengroup():
2152 2154 # Create a changenode group generator that will call our functions
2153 2155 # back to lookup the owning changenode and collect information.
2154 2156 count[:] = [0, len(csets)]
2155 2157 for chunk in cl.group(csets, bundler, reorder=reorder):
2156 2158 yield chunk
2157 2159 progress(_bundling, None)
2158 2160
2159 2161 # Create a generator for the manifestnodes that calls our lookup
2160 2162 # and data collection functions back.
2161 2163 for f in changedfiles:
2162 2164 fnodes[f] = {}
2163 2165 count[:] = [0, len(mfs)]
2164 2166 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2165 2167 yield chunk
2166 2168 progress(_bundling, None)
2167 2169
2168 2170 mfs.clear()
2169 2171
2170 2172 # Go through all our files in order sorted by name.
2171 2173 count[:] = [0, len(changedfiles)]
2172 2174 for fname in sorted(changedfiles):
2173 2175 filerevlog = self.file(fname)
2174 2176 if not len(filerevlog):
2175 2177 raise util.Abort(_("empty or missing revlog for %s")
2176 2178 % fname)
2177 2179 fstate[0] = fname
2178 2180 fstate[1] = fnodes.pop(fname, {})
2179 2181
2180 2182 nodelist = prune(filerevlog, fstate[1])
2181 2183 if nodelist:
2182 2184 count[0] += 1
2183 2185 yield bundler.fileheader(fname)
2184 2186 for chunk in filerevlog.group(nodelist, bundler, reorder):
2185 2187 yield chunk
2186 2188
2187 2189 # Signal that no more groups are left.
2188 2190 yield bundler.close()
2189 2191 progress(_bundling, None)
2190 2192
2191 2193 if csets:
2192 2194 self.hook('outgoing', node=hex(csets[0]), source=source)
2193 2195
2194 2196 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2195 2197
2196 2198 def changegroup(self, basenodes, source):
2197 2199 # to avoid a race we use changegroupsubset() (issue1320)
2198 2200 return self.changegroupsubset(basenodes, self.heads(), source)
2199 2201
2200 2202 @unfilteredmeth
2201 2203 def _changegroup(self, nodes, source):
2202 2204 """Compute the changegroup of all nodes that we have that a recipient
2203 2205 doesn't. Return a chunkbuffer object whose read() method will return
2204 2206 successive changegroup chunks.
2205 2207
2206 2208 This is much easier than the previous function as we can assume that
2207 2209 the recipient has any changenode we aren't sending them.
2208 2210
2209 2211 nodes is the set of nodes to send"""
2210 2212
2211 2213 cl = self.changelog
2212 2214 mf = self.manifest
2213 2215 mfs = {}
2214 2216 changedfiles = set()
2215 2217 fstate = ['']
2216 2218 count = [0, 0]
2217 2219
2218 2220 self.hook('preoutgoing', throw=True, source=source)
2219 2221 self.changegroupinfo(nodes, source)
2220 2222
2221 2223 revset = set([cl.rev(n) for n in nodes])
2222 2224
2223 2225 def gennodelst(log):
2224 2226 ln, llr = log.node, log.linkrev
2225 2227 return [ln(r) for r in log if llr(r) in revset]
2226 2228
2227 2229 progress = self.ui.progress
2228 2230 _bundling = _('bundling')
2229 2231 _changesets = _('changesets')
2230 2232 _manifests = _('manifests')
2231 2233 _files = _('files')
2232 2234
2233 2235 def lookup(revlog, x):
2234 2236 if revlog == cl:
2235 2237 c = cl.read(x)
2236 2238 changedfiles.update(c[3])
2237 2239 mfs.setdefault(c[0], x)
2238 2240 count[0] += 1
2239 2241 progress(_bundling, count[0],
2240 2242 unit=_changesets, total=count[1])
2241 2243 return x
2242 2244 elif revlog == mf:
2243 2245 count[0] += 1
2244 2246 progress(_bundling, count[0],
2245 2247 unit=_manifests, total=count[1])
2246 2248 return cl.node(revlog.linkrev(revlog.rev(x)))
2247 2249 else:
2248 2250 progress(_bundling, count[0], item=fstate[0],
2249 2251 total=count[1], unit=_files)
2250 2252 return cl.node(revlog.linkrev(revlog.rev(x)))
2251 2253
2252 2254 bundler = changegroup.bundle10(lookup)
2253 2255 reorder = self.ui.config('bundle', 'reorder', 'auto')
2254 2256 if reorder == 'auto':
2255 2257 reorder = None
2256 2258 else:
2257 2259 reorder = util.parsebool(reorder)
2258 2260
2259 2261 def gengroup():
2260 2262 '''yield a sequence of changegroup chunks (strings)'''
2261 2263 # construct a list of all changed files
2262 2264
2263 2265 count[:] = [0, len(nodes)]
2264 2266 for chunk in cl.group(nodes, bundler, reorder=reorder):
2265 2267 yield chunk
2266 2268 progress(_bundling, None)
2267 2269
2268 2270 count[:] = [0, len(mfs)]
2269 2271 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2270 2272 yield chunk
2271 2273 progress(_bundling, None)
2272 2274
2273 2275 count[:] = [0, len(changedfiles)]
2274 2276 for fname in sorted(changedfiles):
2275 2277 filerevlog = self.file(fname)
2276 2278 if not len(filerevlog):
2277 2279 raise util.Abort(_("empty or missing revlog for %s")
2278 2280 % fname)
2279 2281 fstate[0] = fname
2280 2282 nodelist = gennodelst(filerevlog)
2281 2283 if nodelist:
2282 2284 count[0] += 1
2283 2285 yield bundler.fileheader(fname)
2284 2286 for chunk in filerevlog.group(nodelist, bundler, reorder):
2285 2287 yield chunk
2286 2288 yield bundler.close()
2287 2289 progress(_bundling, None)
2288 2290
2289 2291 if nodes:
2290 2292 self.hook('outgoing', node=hex(nodes[0]), source=source)
2291 2293
2292 2294 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2293 2295
2296 @unfilteredmeth
2294 2297 def addchangegroup(self, source, srctype, url, emptyok=False):
2295 2298 """Add the changegroup returned by source.read() to this repo.
2296 2299 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2297 2300 the URL of the repo where this changegroup is coming from.
2298 2301
2299 2302 Return an integer summarizing the change to this repo:
2300 2303 - nothing changed or no source: 0
2301 2304 - more heads than before: 1+added heads (2..n)
2302 2305 - fewer heads than before: -1-removed heads (-2..-n)
2303 2306 - number of heads stays the same: 1
2304 2307 """
2305 2308 def csmap(x):
2306 2309 self.ui.debug("add changeset %s\n" % short(x))
2307 2310 return len(cl)
2308 2311
2309 2312 def revmap(x):
2310 2313 return cl.rev(x)
2311 2314
2312 2315 if not source:
2313 2316 return 0
2314 2317
2315 2318 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2316 2319
2317 2320 changesets = files = revisions = 0
2318 2321 efiles = set()
2319 2322
2320 2323 # write changelog data to temp files so concurrent readers will not see
2321 2324 # inconsistent view
2322 2325 cl = self.changelog
2323 2326 cl.delayupdate()
2324 2327 oldheads = cl.heads()
2325 2328
2326 2329 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2327 2330 try:
2328 2331 trp = weakref.proxy(tr)
2329 2332 # pull off the changeset group
2330 2333 self.ui.status(_("adding changesets\n"))
2331 2334 clstart = len(cl)
2332 2335 class prog(object):
2333 2336 step = _('changesets')
2334 2337 count = 1
2335 2338 ui = self.ui
2336 2339 total = None
2337 2340 def __call__(self):
2338 2341 self.ui.progress(self.step, self.count, unit=_('chunks'),
2339 2342 total=self.total)
2340 2343 self.count += 1
2341 2344 pr = prog()
2342 2345 source.callback = pr
2343 2346
2344 2347 source.changelogheader()
2345 2348 srccontent = cl.addgroup(source, csmap, trp)
2346 2349 if not (srccontent or emptyok):
2347 2350 raise util.Abort(_("received changelog group is empty"))
2348 2351 clend = len(cl)
2349 2352 changesets = clend - clstart
2350 2353 for c in xrange(clstart, clend):
2351 2354 efiles.update(self[c].files())
2352 2355 efiles = len(efiles)
2353 2356 self.ui.progress(_('changesets'), None)
2354 2357
2355 2358 # pull off the manifest group
2356 2359 self.ui.status(_("adding manifests\n"))
2357 2360 pr.step = _('manifests')
2358 2361 pr.count = 1
2359 2362 pr.total = changesets # manifests <= changesets
2360 2363 # no need to check for empty manifest group here:
2361 2364 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2362 2365 # no new manifest will be created and the manifest group will
2363 2366 # be empty during the pull
2364 2367 source.manifestheader()
2365 2368 self.manifest.addgroup(source, revmap, trp)
2366 2369 self.ui.progress(_('manifests'), None)
2367 2370
2368 2371 needfiles = {}
2369 2372 if self.ui.configbool('server', 'validate', default=False):
2370 2373 # validate incoming csets have their manifests
2371 2374 for cset in xrange(clstart, clend):
2372 2375 mfest = self.changelog.read(self.changelog.node(cset))[0]
2373 2376 mfest = self.manifest.readdelta(mfest)
2374 2377 # store file nodes we must see
2375 2378 for f, n in mfest.iteritems():
2376 2379 needfiles.setdefault(f, set()).add(n)
2377 2380
2378 2381 # process the files
2379 2382 self.ui.status(_("adding file changes\n"))
2380 2383 pr.step = _('files')
2381 2384 pr.count = 1
2382 2385 pr.total = efiles
2383 2386 source.callback = None
2384 2387
2385 2388 while True:
2386 2389 chunkdata = source.filelogheader()
2387 2390 if not chunkdata:
2388 2391 break
2389 2392 f = chunkdata["filename"]
2390 2393 self.ui.debug("adding %s revisions\n" % f)
2391 2394 pr()
2392 2395 fl = self.file(f)
2393 2396 o = len(fl)
2394 2397 if not fl.addgroup(source, revmap, trp):
2395 2398 raise util.Abort(_("received file revlog group is empty"))
2396 2399 revisions += len(fl) - o
2397 2400 files += 1
2398 2401 if f in needfiles:
2399 2402 needs = needfiles[f]
2400 2403 for new in xrange(o, len(fl)):
2401 2404 n = fl.node(new)
2402 2405 if n in needs:
2403 2406 needs.remove(n)
2404 2407 if not needs:
2405 2408 del needfiles[f]
2406 2409 self.ui.progress(_('files'), None)
2407 2410
2408 2411 for f, needs in needfiles.iteritems():
2409 2412 fl = self.file(f)
2410 2413 for n in needs:
2411 2414 try:
2412 2415 fl.rev(n)
2413 2416 except error.LookupError:
2414 2417 raise util.Abort(
2415 2418 _('missing file data for %s:%s - run hg verify') %
2416 2419 (f, hex(n)))
2417 2420
2418 2421 dh = 0
2419 2422 if oldheads:
2420 2423 heads = cl.heads()
2421 2424 dh = len(heads) - len(oldheads)
2422 2425 for h in heads:
2423 2426 if h not in oldheads and self[h].closesbranch():
2424 2427 dh -= 1
2425 2428 htext = ""
2426 2429 if dh:
2427 2430 htext = _(" (%+d heads)") % dh
2428 2431
2429 2432 self.ui.status(_("added %d changesets"
2430 2433 " with %d changes to %d files%s\n")
2431 2434 % (changesets, revisions, files, htext))
2432 2435 obsolete.clearobscaches(self)
2433 2436
2434 2437 if changesets > 0:
2435 2438 p = lambda: cl.writepending() and self.root or ""
2436 2439 self.hook('pretxnchangegroup', throw=True,
2437 2440 node=hex(cl.node(clstart)), source=srctype,
2438 2441 url=url, pending=p)
2439 2442
2440 2443 added = [cl.node(r) for r in xrange(clstart, clend)]
2441 2444 publishing = self.ui.configbool('phases', 'publish', True)
2442 2445 if srctype == 'push':
2443 2446 # Old server can not push the boundary themself.
2444 2447 # New server won't push the boundary if changeset already
2445 2448 # existed locally as secrete
2446 2449 #
2447 2450 # We should not use added here but the list of all change in
2448 2451 # the bundle
2449 2452 if publishing:
2450 2453 phases.advanceboundary(self, phases.public, srccontent)
2451 2454 else:
2452 2455 phases.advanceboundary(self, phases.draft, srccontent)
2453 2456 phases.retractboundary(self, phases.draft, added)
2454 2457 elif srctype != 'strip':
2455 2458 # publishing only alter behavior during push
2456 2459 #
2457 2460 # strip should not touch boundary at all
2458 2461 phases.retractboundary(self, phases.draft, added)
2459 2462
2460 2463 # make changelog see real files again
2461 2464 cl.finalize(trp)
2462 2465
2463 2466 tr.close()
2464 2467
2465 2468 if changesets > 0:
2466 2469 self.updatebranchcache()
2467 2470 def runhooks():
2468 2471 # forcefully update the on-disk branch cache
2469 2472 self.ui.debug("updating the branch cache\n")
2470 2473 self.hook("changegroup", node=hex(cl.node(clstart)),
2471 2474 source=srctype, url=url)
2472 2475
2473 2476 for n in added:
2474 2477 self.hook("incoming", node=hex(n), source=srctype,
2475 2478 url=url)
2476 2479 self._afterlock(runhooks)
2477 2480
2478 2481 finally:
2479 2482 tr.release()
2480 2483 # never return 0 here:
2481 2484 if dh < 0:
2482 2485 return dh - 1
2483 2486 else:
2484 2487 return dh + 1
2485 2488
2486 2489 def stream_in(self, remote, requirements):
2487 2490 lock = self.lock()
2488 2491 try:
2489 2492 # Save remote branchmap. We will use it later
2490 2493 # to speed up branchcache creation
2491 2494 rbranchmap = None
2492 2495 if remote.capable("branchmap"):
2493 2496 rbranchmap = remote.branchmap()
2494 2497
2495 2498 fp = remote.stream_out()
2496 2499 l = fp.readline()
2497 2500 try:
2498 2501 resp = int(l)
2499 2502 except ValueError:
2500 2503 raise error.ResponseError(
2501 2504 _('unexpected response from remote server:'), l)
2502 2505 if resp == 1:
2503 2506 raise util.Abort(_('operation forbidden by server'))
2504 2507 elif resp == 2:
2505 2508 raise util.Abort(_('locking the remote repository failed'))
2506 2509 elif resp != 0:
2507 2510 raise util.Abort(_('the server sent an unknown error code'))
2508 2511 self.ui.status(_('streaming all changes\n'))
2509 2512 l = fp.readline()
2510 2513 try:
2511 2514 total_files, total_bytes = map(int, l.split(' ', 1))
2512 2515 except (ValueError, TypeError):
2513 2516 raise error.ResponseError(
2514 2517 _('unexpected response from remote server:'), l)
2515 2518 self.ui.status(_('%d files to transfer, %s of data\n') %
2516 2519 (total_files, util.bytecount(total_bytes)))
2517 2520 handled_bytes = 0
2518 2521 self.ui.progress(_('clone'), 0, total=total_bytes)
2519 2522 start = time.time()
2520 2523 for i in xrange(total_files):
2521 2524 # XXX doesn't support '\n' or '\r' in filenames
2522 2525 l = fp.readline()
2523 2526 try:
2524 2527 name, size = l.split('\0', 1)
2525 2528 size = int(size)
2526 2529 except (ValueError, TypeError):
2527 2530 raise error.ResponseError(
2528 2531 _('unexpected response from remote server:'), l)
2529 2532 if self.ui.debugflag:
2530 2533 self.ui.debug('adding %s (%s)\n' %
2531 2534 (name, util.bytecount(size)))
2532 2535 # for backwards compat, name was partially encoded
2533 2536 ofp = self.sopener(store.decodedir(name), 'w')
2534 2537 for chunk in util.filechunkiter(fp, limit=size):
2535 2538 handled_bytes += len(chunk)
2536 2539 self.ui.progress(_('clone'), handled_bytes,
2537 2540 total=total_bytes)
2538 2541 ofp.write(chunk)
2539 2542 ofp.close()
2540 2543 elapsed = time.time() - start
2541 2544 if elapsed <= 0:
2542 2545 elapsed = 0.001
2543 2546 self.ui.progress(_('clone'), None)
2544 2547 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2545 2548 (util.bytecount(total_bytes), elapsed,
2546 2549 util.bytecount(total_bytes / elapsed)))
2547 2550
2548 2551 # new requirements = old non-format requirements +
2549 2552 # new format-related
2550 2553 # requirements from the streamed-in repository
2551 2554 requirements.update(set(self.requirements) - self.supportedformats)
2552 2555 self._applyrequirements(requirements)
2553 2556 self._writerequirements()
2554 2557
2555 2558 if rbranchmap:
2556 2559 rbheads = []
2557 2560 for bheads in rbranchmap.itervalues():
2558 2561 rbheads.extend(bheads)
2559 2562
2560 2563 self.branchcache = rbranchmap
2561 2564 if rbheads:
2562 2565 rtiprev = max((int(self.changelog.rev(node))
2563 2566 for node in rbheads))
2564 2567 self._writebranchcache(self.branchcache,
2565 2568 self[rtiprev].node(), rtiprev)
2566 2569 self.invalidate()
2567 2570 return len(self.heads()) + 1
2568 2571 finally:
2569 2572 lock.release()
2570 2573
2571 2574 def clone(self, remote, heads=[], stream=False):
2572 2575 '''clone remote repository.
2573 2576
2574 2577 keyword arguments:
2575 2578 heads: list of revs to clone (forces use of pull)
2576 2579 stream: use streaming clone if possible'''
2577 2580
2578 2581 # now, all clients that can request uncompressed clones can
2579 2582 # read repo formats supported by all servers that can serve
2580 2583 # them.
2581 2584
2582 2585 # if revlog format changes, client will have to check version
2583 2586 # and format flags on "stream" capability, and use
2584 2587 # uncompressed only if compatible.
2585 2588
2586 2589 if not stream:
2587 2590 # if the server explicitly prefers to stream (for fast LANs)
2588 2591 stream = remote.capable('stream-preferred')
2589 2592
2590 2593 if stream and not heads:
2591 2594 # 'stream' means remote revlog format is revlogv1 only
2592 2595 if remote.capable('stream'):
2593 2596 return self.stream_in(remote, set(('revlogv1',)))
2594 2597 # otherwise, 'streamreqs' contains the remote revlog format
2595 2598 streamreqs = remote.capable('streamreqs')
2596 2599 if streamreqs:
2597 2600 streamreqs = set(streamreqs.split(','))
2598 2601 # if we support it, stream in and adjust our requirements
2599 2602 if not streamreqs - self.supportedformats:
2600 2603 return self.stream_in(remote, streamreqs)
2601 2604 return self.pull(remote, heads)
2602 2605
2603 2606 def pushkey(self, namespace, key, old, new):
2604 2607 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2605 2608 old=old, new=new)
2606 2609 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2607 2610 ret = pushkey.push(self, namespace, key, old, new)
2608 2611 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2609 2612 ret=ret)
2610 2613 return ret
2611 2614
2612 2615 def listkeys(self, namespace):
2613 2616 self.hook('prelistkeys', throw=True, namespace=namespace)
2614 2617 self.ui.debug('listing keys for "%s"\n' % namespace)
2615 2618 values = pushkey.list(self, namespace)
2616 2619 self.hook('listkeys', namespace=namespace, values=values)
2617 2620 return values
2618 2621
2619 2622 def debugwireargs(self, one, two, three=None, four=None, five=None):
2620 2623 '''used to test argument passing over the wire'''
2621 2624 return "%s %s %s %s %s" % (one, two, three, four, five)
2622 2625
2623 2626 def savecommitmessage(self, text):
2624 2627 fp = self.opener('last-message.txt', 'wb')
2625 2628 try:
2626 2629 fp.write(text)
2627 2630 finally:
2628 2631 fp.close()
2629 2632 return self.pathto(fp.name[len(self.root)+1:])
2630 2633
2631 2634 # used to avoid circular references so destructors work
2632 2635 def aftertrans(files):
2633 2636 renamefiles = [tuple(t) for t in files]
2634 2637 def a():
2635 2638 for src, dest in renamefiles:
2636 2639 try:
2637 2640 util.rename(src, dest)
2638 2641 except OSError: # journal file does not yet exist
2639 2642 pass
2640 2643 return a
2641 2644
2642 2645 def undoname(fn):
2643 2646 base, name = os.path.split(fn)
2644 2647 assert name.startswith('journal')
2645 2648 return os.path.join(base, name.replace('journal', 'undo', 1))
2646 2649
2647 2650 def instance(ui, path, create):
2648 2651 return localrepository(ui, util.urllocalpath(path), create)
2649 2652
2650 2653 def islocal(path):
2651 2654 return True
General Comments 0
You need to be logged in to leave comments. Login now