##// END OF EJS Templates
localrepo: introduce destroying function
Idan Kamara -
r18310:4499ba5a default
parent child Browse files
Show More
@@ -1,2562 +1,2577
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if an repo and a unfilteredproperty cached value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo.filtered('unserved')
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return self._repo.branchmap()
95 95
96 96 def heads(self):
97 97 return self._repo.heads()
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 150 filtername = None
151 151
152 152 def _baserequirements(self, create):
153 153 return self.requirements[:]
154 154
155 155 def __init__(self, baseui, path=None, create=False):
156 156 self.wvfs = scmutil.vfs(path, expand=True)
157 157 self.wopener = self.wvfs
158 158 self.root = self.wvfs.base
159 159 self.path = self.wvfs.join(".hg")
160 160 self.origroot = path
161 161 self.auditor = scmutil.pathauditor(self.root, self._checknested)
162 162 self.vfs = scmutil.vfs(self.path)
163 163 self.opener = self.vfs
164 164 self.baseui = baseui
165 165 self.ui = baseui.copy()
166 166 # A list of callback to shape the phase if no data were found.
167 167 # Callback are in the form: func(repo, roots) --> processed root.
168 168 # This list it to be filled by extension during repo setup
169 169 self._phasedefaults = []
170 170 try:
171 171 self.ui.readconfig(self.join("hgrc"), self.root)
172 172 extensions.loadall(self.ui)
173 173 except IOError:
174 174 pass
175 175
176 176 if not self.vfs.isdir():
177 177 if create:
178 178 if not self.wvfs.exists():
179 179 self.wvfs.makedirs()
180 180 self.vfs.makedir(notindexed=True)
181 181 requirements = self._baserequirements(create)
182 182 if self.ui.configbool('format', 'usestore', True):
183 183 self.vfs.mkdir("store")
184 184 requirements.append("store")
185 185 if self.ui.configbool('format', 'usefncache', True):
186 186 requirements.append("fncache")
187 187 if self.ui.configbool('format', 'dotencode', True):
188 188 requirements.append('dotencode')
189 189 # create an invalid changelog
190 190 self.vfs.append(
191 191 "00changelog.i",
192 192 '\0\0\0\2' # represents revlogv2
193 193 ' dummy changelog to prevent using the old repo layout'
194 194 )
195 195 if self.ui.configbool('format', 'generaldelta', False):
196 196 requirements.append("generaldelta")
197 197 requirements = set(requirements)
198 198 else:
199 199 raise error.RepoError(_("repository %s not found") % path)
200 200 elif create:
201 201 raise error.RepoError(_("repository %s already exists") % path)
202 202 else:
203 203 try:
204 204 requirements = scmutil.readrequires(self.vfs, self.supported)
205 205 except IOError, inst:
206 206 if inst.errno != errno.ENOENT:
207 207 raise
208 208 requirements = set()
209 209
210 210 self.sharedpath = self.path
211 211 try:
212 212 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
213 213 if not os.path.exists(s):
214 214 raise error.RepoError(
215 215 _('.hg/sharedpath points to nonexistent directory %s') % s)
216 216 self.sharedpath = s
217 217 except IOError, inst:
218 218 if inst.errno != errno.ENOENT:
219 219 raise
220 220
221 221 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
222 222 self.spath = self.store.path
223 223 self.svfs = self.store.vfs
224 224 self.sopener = self.svfs
225 225 self.sjoin = self.store.join
226 226 self.vfs.createmode = self.store.createmode
227 227 self._applyrequirements(requirements)
228 228 if create:
229 229 self._writerequirements()
230 230
231 231
232 232 self._branchcaches = {}
233 233 self.filterpats = {}
234 234 self._datafilters = {}
235 235 self._transref = self._lockref = self._wlockref = None
236 236
237 237 # A cache for various files under .hg/ that tracks file changes,
238 238 # (used by the filecache decorator)
239 239 #
240 240 # Maps a property name to its util.filecacheentry
241 241 self._filecache = {}
242 242
243 243 # hold sets of revision to be filtered
244 244 # should be cleared when something might have changed the filter value:
245 245 # - new changesets,
246 246 # - phase change,
247 247 # - new obsolescence marker,
248 248 # - working directory parent change,
249 249 # - bookmark changes
250 250 self.filteredrevcache = {}
251 251
252 252 def close(self):
253 253 pass
254 254
255 255 def _restrictcapabilities(self, caps):
256 256 return caps
257 257
258 258 def _applyrequirements(self, requirements):
259 259 self.requirements = requirements
260 260 self.sopener.options = dict((r, 1) for r in requirements
261 261 if r in self.openerreqs)
262 262
263 263 def _writerequirements(self):
264 264 reqfile = self.opener("requires", "w")
265 265 for r in self.requirements:
266 266 reqfile.write("%s\n" % r)
267 267 reqfile.close()
268 268
269 269 def _checknested(self, path):
270 270 """Determine if path is a legal nested repository."""
271 271 if not path.startswith(self.root):
272 272 return False
273 273 subpath = path[len(self.root) + 1:]
274 274 normsubpath = util.pconvert(subpath)
275 275
276 276 # XXX: Checking against the current working copy is wrong in
277 277 # the sense that it can reject things like
278 278 #
279 279 # $ hg cat -r 10 sub/x.txt
280 280 #
281 281 # if sub/ is no longer a subrepository in the working copy
282 282 # parent revision.
283 283 #
284 284 # However, it can of course also allow things that would have
285 285 # been rejected before, such as the above cat command if sub/
286 286 # is a subrepository now, but was a normal directory before.
287 287 # The old path auditor would have rejected by mistake since it
288 288 # panics when it sees sub/.hg/.
289 289 #
290 290 # All in all, checking against the working copy seems sensible
291 291 # since we want to prevent access to nested repositories on
292 292 # the filesystem *now*.
293 293 ctx = self[None]
294 294 parts = util.splitpath(subpath)
295 295 while parts:
296 296 prefix = '/'.join(parts)
297 297 if prefix in ctx.substate:
298 298 if prefix == normsubpath:
299 299 return True
300 300 else:
301 301 sub = ctx.sub(prefix)
302 302 return sub.checknested(subpath[len(prefix) + 1:])
303 303 else:
304 304 parts.pop()
305 305 return False
306 306
307 307 def peer(self):
308 308 return localpeer(self) # not cached to avoid reference cycle
309 309
310 310 def unfiltered(self):
311 311 """Return unfiltered version of the repository
312 312
313 313 Intended to be ovewritten by filtered repo."""
314 314 return self
315 315
316 316 def filtered(self, name):
317 317 """Return a filtered version of a repository"""
318 318 # build a new class with the mixin and the current class
319 319 # (possibily subclass of the repo)
320 320 class proxycls(repoview.repoview, self.unfiltered().__class__):
321 321 pass
322 322 return proxycls(self, name)
323 323
324 324 @repofilecache('bookmarks')
325 325 def _bookmarks(self):
326 326 return bookmarks.bmstore(self)
327 327
328 328 @repofilecache('bookmarks.current')
329 329 def _bookmarkcurrent(self):
330 330 return bookmarks.readcurrent(self)
331 331
332 332 def bookmarkheads(self, bookmark):
333 333 name = bookmark.split('@', 1)[0]
334 334 heads = []
335 335 for mark, n in self._bookmarks.iteritems():
336 336 if mark.split('@', 1)[0] == name:
337 337 heads.append(n)
338 338 return heads
339 339
340 340 @storecache('phaseroots')
341 341 def _phasecache(self):
342 342 return phases.phasecache(self, self._phasedefaults)
343 343
344 344 @storecache('obsstore')
345 345 def obsstore(self):
346 346 store = obsolete.obsstore(self.sopener)
347 347 if store and not obsolete._enabled:
348 348 # message is rare enough to not be translated
349 349 msg = 'obsolete feature not enabled but %i markers found!\n'
350 350 self.ui.warn(msg % len(list(store)))
351 351 return store
352 352
353 353 @storecache('00changelog.i')
354 354 def changelog(self):
355 355 c = changelog.changelog(self.sopener)
356 356 if 'HG_PENDING' in os.environ:
357 357 p = os.environ['HG_PENDING']
358 358 if p.startswith(self.root):
359 359 c.readpending('00changelog.i.a')
360 360 return c
361 361
362 362 @storecache('00manifest.i')
363 363 def manifest(self):
364 364 return manifest.manifest(self.sopener)
365 365
366 366 @repofilecache('dirstate')
367 367 def dirstate(self):
368 368 warned = [0]
369 369 def validate(node):
370 370 try:
371 371 self.changelog.rev(node)
372 372 return node
373 373 except error.LookupError:
374 374 if not warned[0]:
375 375 warned[0] = True
376 376 self.ui.warn(_("warning: ignoring unknown"
377 377 " working parent %s!\n") % short(node))
378 378 return nullid
379 379
380 380 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
381 381
382 382 def __getitem__(self, changeid):
383 383 if changeid is None:
384 384 return context.workingctx(self)
385 385 return context.changectx(self, changeid)
386 386
387 387 def __contains__(self, changeid):
388 388 try:
389 389 return bool(self.lookup(changeid))
390 390 except error.RepoLookupError:
391 391 return False
392 392
393 393 def __nonzero__(self):
394 394 return True
395 395
396 396 def __len__(self):
397 397 return len(self.changelog)
398 398
399 399 def __iter__(self):
400 400 return iter(self.changelog)
401 401
402 402 def revs(self, expr, *args):
403 403 '''Return a list of revisions matching the given revset'''
404 404 expr = revset.formatspec(expr, *args)
405 405 m = revset.match(None, expr)
406 406 return [r for r in m(self, list(self))]
407 407
408 408 def set(self, expr, *args):
409 409 '''
410 410 Yield a context for each matching revision, after doing arg
411 411 replacement via revset.formatspec
412 412 '''
413 413 for r in self.revs(expr, *args):
414 414 yield self[r]
415 415
416 416 def url(self):
417 417 return 'file:' + self.root
418 418
419 419 def hook(self, name, throw=False, **args):
420 420 return hook.hook(self.ui, self, name, throw, **args)
421 421
422 422 @unfilteredmethod
423 423 def _tag(self, names, node, message, local, user, date, extra={}):
424 424 if isinstance(names, str):
425 425 names = (names,)
426 426
427 427 branches = self.branchmap()
428 428 for name in names:
429 429 self.hook('pretag', throw=True, node=hex(node), tag=name,
430 430 local=local)
431 431 if name in branches:
432 432 self.ui.warn(_("warning: tag %s conflicts with existing"
433 433 " branch name\n") % name)
434 434
435 435 def writetags(fp, names, munge, prevtags):
436 436 fp.seek(0, 2)
437 437 if prevtags and prevtags[-1] != '\n':
438 438 fp.write('\n')
439 439 for name in names:
440 440 m = munge and munge(name) or name
441 441 if (self._tagscache.tagtypes and
442 442 name in self._tagscache.tagtypes):
443 443 old = self.tags().get(name, nullid)
444 444 fp.write('%s %s\n' % (hex(old), m))
445 445 fp.write('%s %s\n' % (hex(node), m))
446 446 fp.close()
447 447
448 448 prevtags = ''
449 449 if local:
450 450 try:
451 451 fp = self.opener('localtags', 'r+')
452 452 except IOError:
453 453 fp = self.opener('localtags', 'a')
454 454 else:
455 455 prevtags = fp.read()
456 456
457 457 # local tags are stored in the current charset
458 458 writetags(fp, names, None, prevtags)
459 459 for name in names:
460 460 self.hook('tag', node=hex(node), tag=name, local=local)
461 461 return
462 462
463 463 try:
464 464 fp = self.wfile('.hgtags', 'rb+')
465 465 except IOError, e:
466 466 if e.errno != errno.ENOENT:
467 467 raise
468 468 fp = self.wfile('.hgtags', 'ab')
469 469 else:
470 470 prevtags = fp.read()
471 471
472 472 # committed tags are stored in UTF-8
473 473 writetags(fp, names, encoding.fromlocal, prevtags)
474 474
475 475 fp.close()
476 476
477 477 self.invalidatecaches()
478 478
479 479 if '.hgtags' not in self.dirstate:
480 480 self[None].add(['.hgtags'])
481 481
482 482 m = matchmod.exact(self.root, '', ['.hgtags'])
483 483 tagnode = self.commit(message, user, date, extra=extra, match=m)
484 484
485 485 for name in names:
486 486 self.hook('tag', node=hex(node), tag=name, local=local)
487 487
488 488 return tagnode
489 489
490 490 def tag(self, names, node, message, local, user, date):
491 491 '''tag a revision with one or more symbolic names.
492 492
493 493 names is a list of strings or, when adding a single tag, names may be a
494 494 string.
495 495
496 496 if local is True, the tags are stored in a per-repository file.
497 497 otherwise, they are stored in the .hgtags file, and a new
498 498 changeset is committed with the change.
499 499
500 500 keyword arguments:
501 501
502 502 local: whether to store tags in non-version-controlled file
503 503 (default False)
504 504
505 505 message: commit message to use if committing
506 506
507 507 user: name of user to use if committing
508 508
509 509 date: date tuple to use if committing'''
510 510
511 511 if not local:
512 512 for x in self.status()[:5]:
513 513 if '.hgtags' in x:
514 514 raise util.Abort(_('working copy of .hgtags is changed '
515 515 '(please commit .hgtags manually)'))
516 516
517 517 self.tags() # instantiate the cache
518 518 self._tag(names, node, message, local, user, date)
519 519
520 520 @filteredpropertycache
521 521 def _tagscache(self):
522 522 '''Returns a tagscache object that contains various tags related
523 523 caches.'''
524 524
525 525 # This simplifies its cache management by having one decorated
526 526 # function (this one) and the rest simply fetch things from it.
527 527 class tagscache(object):
528 528 def __init__(self):
529 529 # These two define the set of tags for this repository. tags
530 530 # maps tag name to node; tagtypes maps tag name to 'global' or
531 531 # 'local'. (Global tags are defined by .hgtags across all
532 532 # heads, and local tags are defined in .hg/localtags.)
533 533 # They constitute the in-memory cache of tags.
534 534 self.tags = self.tagtypes = None
535 535
536 536 self.nodetagscache = self.tagslist = None
537 537
538 538 cache = tagscache()
539 539 cache.tags, cache.tagtypes = self._findtags()
540 540
541 541 return cache
542 542
543 543 def tags(self):
544 544 '''return a mapping of tag to node'''
545 545 t = {}
546 546 if self.changelog.filteredrevs:
547 547 tags, tt = self._findtags()
548 548 else:
549 549 tags = self._tagscache.tags
550 550 for k, v in tags.iteritems():
551 551 try:
552 552 # ignore tags to unknown nodes
553 553 self.changelog.rev(v)
554 554 t[k] = v
555 555 except (error.LookupError, ValueError):
556 556 pass
557 557 return t
558 558
559 559 def _findtags(self):
560 560 '''Do the hard work of finding tags. Return a pair of dicts
561 561 (tags, tagtypes) where tags maps tag name to node, and tagtypes
562 562 maps tag name to a string like \'global\' or \'local\'.
563 563 Subclasses or extensions are free to add their own tags, but
564 564 should be aware that the returned dicts will be retained for the
565 565 duration of the localrepo object.'''
566 566
567 567 # XXX what tagtype should subclasses/extensions use? Currently
568 568 # mq and bookmarks add tags, but do not set the tagtype at all.
569 569 # Should each extension invent its own tag type? Should there
570 570 # be one tagtype for all such "virtual" tags? Or is the status
571 571 # quo fine?
572 572
573 573 alltags = {} # map tag name to (node, hist)
574 574 tagtypes = {}
575 575
576 576 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
577 577 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
578 578
579 579 # Build the return dicts. Have to re-encode tag names because
580 580 # the tags module always uses UTF-8 (in order not to lose info
581 581 # writing to the cache), but the rest of Mercurial wants them in
582 582 # local encoding.
583 583 tags = {}
584 584 for (name, (node, hist)) in alltags.iteritems():
585 585 if node != nullid:
586 586 tags[encoding.tolocal(name)] = node
587 587 tags['tip'] = self.changelog.tip()
588 588 tagtypes = dict([(encoding.tolocal(name), value)
589 589 for (name, value) in tagtypes.iteritems()])
590 590 return (tags, tagtypes)
591 591
592 592 def tagtype(self, tagname):
593 593 '''
594 594 return the type of the given tag. result can be:
595 595
596 596 'local' : a local tag
597 597 'global' : a global tag
598 598 None : tag does not exist
599 599 '''
600 600
601 601 return self._tagscache.tagtypes.get(tagname)
602 602
603 603 def tagslist(self):
604 604 '''return a list of tags ordered by revision'''
605 605 if not self._tagscache.tagslist:
606 606 l = []
607 607 for t, n in self.tags().iteritems():
608 608 r = self.changelog.rev(n)
609 609 l.append((r, t, n))
610 610 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
611 611
612 612 return self._tagscache.tagslist
613 613
614 614 def nodetags(self, node):
615 615 '''return the tags associated with a node'''
616 616 if not self._tagscache.nodetagscache:
617 617 nodetagscache = {}
618 618 for t, n in self._tagscache.tags.iteritems():
619 619 nodetagscache.setdefault(n, []).append(t)
620 620 for tags in nodetagscache.itervalues():
621 621 tags.sort()
622 622 self._tagscache.nodetagscache = nodetagscache
623 623 return self._tagscache.nodetagscache.get(node, [])
624 624
625 625 def nodebookmarks(self, node):
626 626 marks = []
627 627 for bookmark, n in self._bookmarks.iteritems():
628 628 if n == node:
629 629 marks.append(bookmark)
630 630 return sorted(marks)
631 631
632 632 def branchmap(self):
633 633 '''returns a dictionary {branch: [branchheads]}'''
634 634 branchmap.updatecache(self)
635 635 return self._branchcaches[self.filtername]
636 636
637 637
638 638 def _branchtip(self, heads):
639 639 '''return the tipmost branch head in heads'''
640 640 tip = heads[-1]
641 641 for h in reversed(heads):
642 642 if not self[h].closesbranch():
643 643 tip = h
644 644 break
645 645 return tip
646 646
647 647 def branchtip(self, branch):
648 648 '''return the tip node for a given branch'''
649 649 if branch not in self.branchmap():
650 650 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
651 651 return self._branchtip(self.branchmap()[branch])
652 652
653 653 def branchtags(self):
654 654 '''return a dict where branch names map to the tipmost head of
655 655 the branch, open heads come before closed'''
656 656 bt = {}
657 657 for bn, heads in self.branchmap().iteritems():
658 658 bt[bn] = self._branchtip(heads)
659 659 return bt
660 660
661 661 def lookup(self, key):
662 662 return self[key].node()
663 663
664 664 def lookupbranch(self, key, remote=None):
665 665 repo = remote or self
666 666 if key in repo.branchmap():
667 667 return key
668 668
669 669 repo = (remote and remote.local()) and remote or self
670 670 return repo[key].branch()
671 671
672 672 def known(self, nodes):
673 673 nm = self.changelog.nodemap
674 674 pc = self._phasecache
675 675 result = []
676 676 for n in nodes:
677 677 r = nm.get(n)
678 678 resp = not (r is None or pc.phase(self, r) >= phases.secret)
679 679 result.append(resp)
680 680 return result
681 681
682 682 def local(self):
683 683 return self
684 684
685 685 def cancopy(self):
686 686 return self.local() # so statichttprepo's override of local() works
687 687
688 688 def join(self, f):
689 689 return os.path.join(self.path, f)
690 690
691 691 def wjoin(self, f):
692 692 return os.path.join(self.root, f)
693 693
694 694 def file(self, f):
695 695 if f[0] == '/':
696 696 f = f[1:]
697 697 return filelog.filelog(self.sopener, f)
698 698
699 699 def changectx(self, changeid):
700 700 return self[changeid]
701 701
702 702 def parents(self, changeid=None):
703 703 '''get list of changectxs for parents of changeid'''
704 704 return self[changeid].parents()
705 705
706 706 def setparents(self, p1, p2=nullid):
707 707 copies = self.dirstate.setparents(p1, p2)
708 708 if copies:
709 709 # Adjust copy records, the dirstate cannot do it, it
710 710 # requires access to parents manifests. Preserve them
711 711 # only for entries added to first parent.
712 712 pctx = self[p1]
713 713 for f in copies:
714 714 if f not in pctx and copies[f] in pctx:
715 715 self.dirstate.copy(copies[f], f)
716 716
717 717 def filectx(self, path, changeid=None, fileid=None):
718 718 """changeid can be a changeset revision, node, or tag.
719 719 fileid can be a file revision or node."""
720 720 return context.filectx(self, path, changeid, fileid)
721 721
722 722 def getcwd(self):
723 723 return self.dirstate.getcwd()
724 724
725 725 def pathto(self, f, cwd=None):
726 726 return self.dirstate.pathto(f, cwd)
727 727
728 728 def wfile(self, f, mode='r'):
729 729 return self.wopener(f, mode)
730 730
731 731 def _link(self, f):
732 732 return os.path.islink(self.wjoin(f))
733 733
734 734 def _loadfilter(self, filter):
735 735 if filter not in self.filterpats:
736 736 l = []
737 737 for pat, cmd in self.ui.configitems(filter):
738 738 if cmd == '!':
739 739 continue
740 740 mf = matchmod.match(self.root, '', [pat])
741 741 fn = None
742 742 params = cmd
743 743 for name, filterfn in self._datafilters.iteritems():
744 744 if cmd.startswith(name):
745 745 fn = filterfn
746 746 params = cmd[len(name):].lstrip()
747 747 break
748 748 if not fn:
749 749 fn = lambda s, c, **kwargs: util.filter(s, c)
750 750 # Wrap old filters not supporting keyword arguments
751 751 if not inspect.getargspec(fn)[2]:
752 752 oldfn = fn
753 753 fn = lambda s, c, **kwargs: oldfn(s, c)
754 754 l.append((mf, fn, params))
755 755 self.filterpats[filter] = l
756 756 return self.filterpats[filter]
757 757
758 758 def _filter(self, filterpats, filename, data):
759 759 for mf, fn, cmd in filterpats:
760 760 if mf(filename):
761 761 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
762 762 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
763 763 break
764 764
765 765 return data
766 766
767 767 @unfilteredpropertycache
768 768 def _encodefilterpats(self):
769 769 return self._loadfilter('encode')
770 770
771 771 @unfilteredpropertycache
772 772 def _decodefilterpats(self):
773 773 return self._loadfilter('decode')
774 774
775 775 def adddatafilter(self, name, filter):
776 776 self._datafilters[name] = filter
777 777
778 778 def wread(self, filename):
779 779 if self._link(filename):
780 780 data = os.readlink(self.wjoin(filename))
781 781 else:
782 782 data = self.wopener.read(filename)
783 783 return self._filter(self._encodefilterpats, filename, data)
784 784
785 785 def wwrite(self, filename, data, flags):
786 786 data = self._filter(self._decodefilterpats, filename, data)
787 787 if 'l' in flags:
788 788 self.wopener.symlink(data, filename)
789 789 else:
790 790 self.wopener.write(filename, data)
791 791 if 'x' in flags:
792 792 util.setflags(self.wjoin(filename), False, True)
793 793
794 794 def wwritedata(self, filename, data):
795 795 return self._filter(self._decodefilterpats, filename, data)
796 796
797 797 def transaction(self, desc):
798 798 tr = self._transref and self._transref() or None
799 799 if tr and tr.running():
800 800 return tr.nest()
801 801
802 802 # abort here if the journal already exists
803 803 if os.path.exists(self.sjoin("journal")):
804 804 raise error.RepoError(
805 805 _("abandoned transaction found - run hg recover"))
806 806
807 807 self._writejournal(desc)
808 808 renames = [(x, undoname(x)) for x in self._journalfiles()]
809 809
810 810 tr = transaction.transaction(self.ui.warn, self.sopener,
811 811 self.sjoin("journal"),
812 812 aftertrans(renames),
813 813 self.store.createmode)
814 814 self._transref = weakref.ref(tr)
815 815 return tr
816 816
817 817 def _journalfiles(self):
818 818 return (self.sjoin('journal'), self.join('journal.dirstate'),
819 819 self.join('journal.branch'), self.join('journal.desc'),
820 820 self.join('journal.bookmarks'),
821 821 self.sjoin('journal.phaseroots'))
822 822
823 823 def undofiles(self):
824 824 return [undoname(x) for x in self._journalfiles()]
825 825
826 826 def _writejournal(self, desc):
827 827 self.opener.write("journal.dirstate",
828 828 self.opener.tryread("dirstate"))
829 829 self.opener.write("journal.branch",
830 830 encoding.fromlocal(self.dirstate.branch()))
831 831 self.opener.write("journal.desc",
832 832 "%d\n%s\n" % (len(self), desc))
833 833 self.opener.write("journal.bookmarks",
834 834 self.opener.tryread("bookmarks"))
835 835 self.sopener.write("journal.phaseroots",
836 836 self.sopener.tryread("phaseroots"))
837 837
838 838 def recover(self):
839 839 lock = self.lock()
840 840 try:
841 841 if os.path.exists(self.sjoin("journal")):
842 842 self.ui.status(_("rolling back interrupted transaction\n"))
843 843 transaction.rollback(self.sopener, self.sjoin("journal"),
844 844 self.ui.warn)
845 845 self.invalidate()
846 846 return True
847 847 else:
848 848 self.ui.warn(_("no interrupted transaction available\n"))
849 849 return False
850 850 finally:
851 851 lock.release()
852 852
853 853 def rollback(self, dryrun=False, force=False):
854 854 wlock = lock = None
855 855 try:
856 856 wlock = self.wlock()
857 857 lock = self.lock()
858 858 if os.path.exists(self.sjoin("undo")):
859 859 return self._rollback(dryrun, force)
860 860 else:
861 861 self.ui.warn(_("no rollback information available\n"))
862 862 return 1
863 863 finally:
864 864 release(lock, wlock)
865 865
866 866 @unfilteredmethod # Until we get smarter cache management
867 867 def _rollback(self, dryrun, force):
868 868 ui = self.ui
869 869 try:
870 870 args = self.opener.read('undo.desc').splitlines()
871 871 (oldlen, desc, detail) = (int(args[0]), args[1], None)
872 872 if len(args) >= 3:
873 873 detail = args[2]
874 874 oldtip = oldlen - 1
875 875
876 876 if detail and ui.verbose:
877 877 msg = (_('repository tip rolled back to revision %s'
878 878 ' (undo %s: %s)\n')
879 879 % (oldtip, desc, detail))
880 880 else:
881 881 msg = (_('repository tip rolled back to revision %s'
882 882 ' (undo %s)\n')
883 883 % (oldtip, desc))
884 884 except IOError:
885 885 msg = _('rolling back unknown transaction\n')
886 886 desc = None
887 887
888 888 if not force and self['.'] != self['tip'] and desc == 'commit':
889 889 raise util.Abort(
890 890 _('rollback of last commit while not checked out '
891 891 'may lose data'), hint=_('use -f to force'))
892 892
893 893 ui.status(msg)
894 894 if dryrun:
895 895 return 0
896 896
897 897 parents = self.dirstate.parents()
898 self.destroying()
898 899 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
899 900 if os.path.exists(self.join('undo.bookmarks')):
900 901 util.rename(self.join('undo.bookmarks'),
901 902 self.join('bookmarks'))
902 903 if os.path.exists(self.sjoin('undo.phaseroots')):
903 904 util.rename(self.sjoin('undo.phaseroots'),
904 905 self.sjoin('phaseroots'))
905 906 self.invalidate()
906 907
907 908 # Discard all cache entries to force reloading everything.
908 909 self._filecache.clear()
909 910
910 911 parentgone = (parents[0] not in self.changelog.nodemap or
911 912 parents[1] not in self.changelog.nodemap)
912 913 if parentgone:
913 914 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
914 915 try:
915 916 branch = self.opener.read('undo.branch')
916 917 self.dirstate.setbranch(encoding.tolocal(branch))
917 918 except IOError:
918 919 ui.warn(_('named branch could not be reset: '
919 920 'current branch is still \'%s\'\n')
920 921 % self.dirstate.branch())
921 922
922 923 self.dirstate.invalidate()
923 924 parents = tuple([p.rev() for p in self.parents()])
924 925 if len(parents) > 1:
925 926 ui.status(_('working directory now based on '
926 927 'revisions %d and %d\n') % parents)
927 928 else:
928 929 ui.status(_('working directory now based on '
929 930 'revision %d\n') % parents)
930 931 # TODO: if we know which new heads may result from this rollback, pass
931 932 # them to destroy(), which will prevent the branchhead cache from being
932 933 # invalidated.
933 934 self.destroyed()
934 935 return 0
935 936
936 937 def invalidatecaches(self):
937 938
938 939 if '_tagscache' in vars(self):
939 940 # can't use delattr on proxy
940 941 del self.__dict__['_tagscache']
941 942
942 943 self.unfiltered()._branchcaches.clear()
943 944 self.invalidatevolatilesets()
944 945
945 946 def invalidatevolatilesets(self):
946 947 self.filteredrevcache.clear()
947 948 obsolete.clearobscaches(self)
948 949
949 950 def invalidatedirstate(self):
950 951 '''Invalidates the dirstate, causing the next call to dirstate
951 952 to check if it was modified since the last time it was read,
952 953 rereading it if it has.
953 954
954 955 This is different to dirstate.invalidate() that it doesn't always
955 956 rereads the dirstate. Use dirstate.invalidate() if you want to
956 957 explicitly read the dirstate again (i.e. restoring it to a previous
957 958 known good state).'''
958 959 if hasunfilteredcache(self, 'dirstate'):
959 960 for k in self.dirstate._filecache:
960 961 try:
961 962 delattr(self.dirstate, k)
962 963 except AttributeError:
963 964 pass
964 965 delattr(self.unfiltered(), 'dirstate')
965 966
966 967 def invalidate(self):
967 968 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
968 969 for k in self._filecache:
969 970 # dirstate is invalidated separately in invalidatedirstate()
970 971 if k == 'dirstate':
971 972 continue
972 973
973 974 try:
974 975 delattr(unfiltered, k)
975 976 except AttributeError:
976 977 pass
977 978 self.invalidatecaches()
978 979
979 980 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
980 981 try:
981 982 l = lock.lock(lockname, 0, releasefn, desc=desc)
982 983 except error.LockHeld, inst:
983 984 if not wait:
984 985 raise
985 986 self.ui.warn(_("waiting for lock on %s held by %r\n") %
986 987 (desc, inst.locker))
987 988 # default to 600 seconds timeout
988 989 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
989 990 releasefn, desc=desc)
990 991 if acquirefn:
991 992 acquirefn()
992 993 return l
993 994
994 995 def _afterlock(self, callback):
995 996 """add a callback to the current repository lock.
996 997
997 998 The callback will be executed on lock release."""
998 999 l = self._lockref and self._lockref()
999 1000 if l:
1000 1001 l.postrelease.append(callback)
1001 1002 else:
1002 1003 callback()
1003 1004
1004 1005 def lock(self, wait=True):
1005 1006 '''Lock the repository store (.hg/store) and return a weak reference
1006 1007 to the lock. Use this before modifying the store (e.g. committing or
1007 1008 stripping). If you are opening a transaction, get a lock as well.)'''
1008 1009 l = self._lockref and self._lockref()
1009 1010 if l is not None and l.held:
1010 1011 l.lock()
1011 1012 return l
1012 1013
1013 1014 def unlock():
1014 1015 self.store.write()
1015 1016 if hasunfilteredcache(self, '_phasecache'):
1016 1017 self._phasecache.write()
1017 1018 for k, ce in self._filecache.items():
1018 1019 if k == 'dirstate' or k not in self.__dict__:
1019 1020 continue
1020 1021 ce.refresh()
1021 1022
1022 1023 l = self._lock(self.sjoin("lock"), wait, unlock,
1023 1024 self.invalidate, _('repository %s') % self.origroot)
1024 1025 self._lockref = weakref.ref(l)
1025 1026 return l
1026 1027
1027 1028 def wlock(self, wait=True):
1028 1029 '''Lock the non-store parts of the repository (everything under
1029 1030 .hg except .hg/store) and return a weak reference to the lock.
1030 1031 Use this before modifying files in .hg.'''
1031 1032 l = self._wlockref and self._wlockref()
1032 1033 if l is not None and l.held:
1033 1034 l.lock()
1034 1035 return l
1035 1036
1036 1037 def unlock():
1037 1038 self.dirstate.write()
1038 1039 ce = self._filecache.get('dirstate')
1039 1040 if ce:
1040 1041 ce.refresh()
1041 1042
1042 1043 l = self._lock(self.join("wlock"), wait, unlock,
1043 1044 self.invalidatedirstate, _('working directory of %s') %
1044 1045 self.origroot)
1045 1046 self._wlockref = weakref.ref(l)
1046 1047 return l
1047 1048
1048 1049 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1049 1050 """
1050 1051 commit an individual file as part of a larger transaction
1051 1052 """
1052 1053
1053 1054 fname = fctx.path()
1054 1055 text = fctx.data()
1055 1056 flog = self.file(fname)
1056 1057 fparent1 = manifest1.get(fname, nullid)
1057 1058 fparent2 = fparent2o = manifest2.get(fname, nullid)
1058 1059
1059 1060 meta = {}
1060 1061 copy = fctx.renamed()
1061 1062 if copy and copy[0] != fname:
1062 1063 # Mark the new revision of this file as a copy of another
1063 1064 # file. This copy data will effectively act as a parent
1064 1065 # of this new revision. If this is a merge, the first
1065 1066 # parent will be the nullid (meaning "look up the copy data")
1066 1067 # and the second one will be the other parent. For example:
1067 1068 #
1068 1069 # 0 --- 1 --- 3 rev1 changes file foo
1069 1070 # \ / rev2 renames foo to bar and changes it
1070 1071 # \- 2 -/ rev3 should have bar with all changes and
1071 1072 # should record that bar descends from
1072 1073 # bar in rev2 and foo in rev1
1073 1074 #
1074 1075 # this allows this merge to succeed:
1075 1076 #
1076 1077 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1077 1078 # \ / merging rev3 and rev4 should use bar@rev2
1078 1079 # \- 2 --- 4 as the merge base
1079 1080 #
1080 1081
1081 1082 cfname = copy[0]
1082 1083 crev = manifest1.get(cfname)
1083 1084 newfparent = fparent2
1084 1085
1085 1086 if manifest2: # branch merge
1086 1087 if fparent2 == nullid or crev is None: # copied on remote side
1087 1088 if cfname in manifest2:
1088 1089 crev = manifest2[cfname]
1089 1090 newfparent = fparent1
1090 1091
1091 1092 # find source in nearest ancestor if we've lost track
1092 1093 if not crev:
1093 1094 self.ui.debug(" %s: searching for copy revision for %s\n" %
1094 1095 (fname, cfname))
1095 1096 for ancestor in self[None].ancestors():
1096 1097 if cfname in ancestor:
1097 1098 crev = ancestor[cfname].filenode()
1098 1099 break
1099 1100
1100 1101 if crev:
1101 1102 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1102 1103 meta["copy"] = cfname
1103 1104 meta["copyrev"] = hex(crev)
1104 1105 fparent1, fparent2 = nullid, newfparent
1105 1106 else:
1106 1107 self.ui.warn(_("warning: can't find ancestor for '%s' "
1107 1108 "copied from '%s'!\n") % (fname, cfname))
1108 1109
1109 1110 elif fparent2 != nullid:
1110 1111 # is one parent an ancestor of the other?
1111 1112 fparentancestor = flog.ancestor(fparent1, fparent2)
1112 1113 if fparentancestor == fparent1:
1113 1114 fparent1, fparent2 = fparent2, nullid
1114 1115 elif fparentancestor == fparent2:
1115 1116 fparent2 = nullid
1116 1117
1117 1118 # is the file changed?
1118 1119 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1119 1120 changelist.append(fname)
1120 1121 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1121 1122
1122 1123 # are just the flags changed during merge?
1123 1124 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1124 1125 changelist.append(fname)
1125 1126
1126 1127 return fparent1
1127 1128
1128 1129 @unfilteredmethod
1129 1130 def commit(self, text="", user=None, date=None, match=None, force=False,
1130 1131 editor=False, extra={}):
1131 1132 """Add a new revision to current repository.
1132 1133
1133 1134 Revision information is gathered from the working directory,
1134 1135 match can be used to filter the committed files. If editor is
1135 1136 supplied, it is called to get a commit message.
1136 1137 """
1137 1138
1138 1139 def fail(f, msg):
1139 1140 raise util.Abort('%s: %s' % (f, msg))
1140 1141
1141 1142 if not match:
1142 1143 match = matchmod.always(self.root, '')
1143 1144
1144 1145 if not force:
1145 1146 vdirs = []
1146 1147 match.dir = vdirs.append
1147 1148 match.bad = fail
1148 1149
1149 1150 wlock = self.wlock()
1150 1151 try:
1151 1152 wctx = self[None]
1152 1153 merge = len(wctx.parents()) > 1
1153 1154
1154 1155 if (not force and merge and match and
1155 1156 (match.files() or match.anypats())):
1156 1157 raise util.Abort(_('cannot partially commit a merge '
1157 1158 '(do not specify files or patterns)'))
1158 1159
1159 1160 changes = self.status(match=match, clean=force)
1160 1161 if force:
1161 1162 changes[0].extend(changes[6]) # mq may commit unchanged files
1162 1163
1163 1164 # check subrepos
1164 1165 subs = []
1165 1166 commitsubs = set()
1166 1167 newstate = wctx.substate.copy()
1167 1168 # only manage subrepos and .hgsubstate if .hgsub is present
1168 1169 if '.hgsub' in wctx:
1169 1170 # we'll decide whether to track this ourselves, thanks
1170 1171 if '.hgsubstate' in changes[0]:
1171 1172 changes[0].remove('.hgsubstate')
1172 1173 if '.hgsubstate' in changes[2]:
1173 1174 changes[2].remove('.hgsubstate')
1174 1175
1175 1176 # compare current state to last committed state
1176 1177 # build new substate based on last committed state
1177 1178 oldstate = wctx.p1().substate
1178 1179 for s in sorted(newstate.keys()):
1179 1180 if not match(s):
1180 1181 # ignore working copy, use old state if present
1181 1182 if s in oldstate:
1182 1183 newstate[s] = oldstate[s]
1183 1184 continue
1184 1185 if not force:
1185 1186 raise util.Abort(
1186 1187 _("commit with new subrepo %s excluded") % s)
1187 1188 if wctx.sub(s).dirty(True):
1188 1189 if not self.ui.configbool('ui', 'commitsubrepos'):
1189 1190 raise util.Abort(
1190 1191 _("uncommitted changes in subrepo %s") % s,
1191 1192 hint=_("use --subrepos for recursive commit"))
1192 1193 subs.append(s)
1193 1194 commitsubs.add(s)
1194 1195 else:
1195 1196 bs = wctx.sub(s).basestate()
1196 1197 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1197 1198 if oldstate.get(s, (None, None, None))[1] != bs:
1198 1199 subs.append(s)
1199 1200
1200 1201 # check for removed subrepos
1201 1202 for p in wctx.parents():
1202 1203 r = [s for s in p.substate if s not in newstate]
1203 1204 subs += [s for s in r if match(s)]
1204 1205 if subs:
1205 1206 if (not match('.hgsub') and
1206 1207 '.hgsub' in (wctx.modified() + wctx.added())):
1207 1208 raise util.Abort(
1208 1209 _("can't commit subrepos without .hgsub"))
1209 1210 changes[0].insert(0, '.hgsubstate')
1210 1211
1211 1212 elif '.hgsub' in changes[2]:
1212 1213 # clean up .hgsubstate when .hgsub is removed
1213 1214 if ('.hgsubstate' in wctx and
1214 1215 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1215 1216 changes[2].insert(0, '.hgsubstate')
1216 1217
1217 1218 # make sure all explicit patterns are matched
1218 1219 if not force and match.files():
1219 1220 matched = set(changes[0] + changes[1] + changes[2])
1220 1221
1221 1222 for f in match.files():
1222 1223 f = self.dirstate.normalize(f)
1223 1224 if f == '.' or f in matched or f in wctx.substate:
1224 1225 continue
1225 1226 if f in changes[3]: # missing
1226 1227 fail(f, _('file not found!'))
1227 1228 if f in vdirs: # visited directory
1228 1229 d = f + '/'
1229 1230 for mf in matched:
1230 1231 if mf.startswith(d):
1231 1232 break
1232 1233 else:
1233 1234 fail(f, _("no match under directory!"))
1234 1235 elif f not in self.dirstate:
1235 1236 fail(f, _("file not tracked!"))
1236 1237
1237 1238 if (not force and not extra.get("close") and not merge
1238 1239 and not (changes[0] or changes[1] or changes[2])
1239 1240 and wctx.branch() == wctx.p1().branch()):
1240 1241 return None
1241 1242
1242 1243 if merge and changes[3]:
1243 1244 raise util.Abort(_("cannot commit merge with missing files"))
1244 1245
1245 1246 ms = mergemod.mergestate(self)
1246 1247 for f in changes[0]:
1247 1248 if f in ms and ms[f] == 'u':
1248 1249 raise util.Abort(_("unresolved merge conflicts "
1249 1250 "(see hg help resolve)"))
1250 1251
1251 1252 cctx = context.workingctx(self, text, user, date, extra, changes)
1252 1253 if editor:
1253 1254 cctx._text = editor(self, cctx, subs)
1254 1255 edited = (text != cctx._text)
1255 1256
1256 1257 # commit subs and write new state
1257 1258 if subs:
1258 1259 for s in sorted(commitsubs):
1259 1260 sub = wctx.sub(s)
1260 1261 self.ui.status(_('committing subrepository %s\n') %
1261 1262 subrepo.subrelpath(sub))
1262 1263 sr = sub.commit(cctx._text, user, date)
1263 1264 newstate[s] = (newstate[s][0], sr)
1264 1265 subrepo.writestate(self, newstate)
1265 1266
1266 1267 # Save commit message in case this transaction gets rolled back
1267 1268 # (e.g. by a pretxncommit hook). Leave the content alone on
1268 1269 # the assumption that the user will use the same editor again.
1269 1270 msgfn = self.savecommitmessage(cctx._text)
1270 1271
1271 1272 p1, p2 = self.dirstate.parents()
1272 1273 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1273 1274 try:
1274 1275 self.hook("precommit", throw=True, parent1=hookp1,
1275 1276 parent2=hookp2)
1276 1277 ret = self.commitctx(cctx, True)
1277 1278 except: # re-raises
1278 1279 if edited:
1279 1280 self.ui.write(
1280 1281 _('note: commit message saved in %s\n') % msgfn)
1281 1282 raise
1282 1283
1283 1284 # update bookmarks, dirstate and mergestate
1284 1285 bookmarks.update(self, [p1, p2], ret)
1285 1286 for f in changes[0] + changes[1]:
1286 1287 self.dirstate.normal(f)
1287 1288 for f in changes[2]:
1288 1289 self.dirstate.drop(f)
1289 1290 self.dirstate.setparents(ret)
1290 1291 ms.reset()
1291 1292 finally:
1292 1293 wlock.release()
1293 1294
1294 1295 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1295 1296 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1296 1297 self._afterlock(commithook)
1297 1298 return ret
1298 1299
1299 1300 @unfilteredmethod
1300 1301 def commitctx(self, ctx, error=False):
1301 1302 """Add a new revision to current repository.
1302 1303 Revision information is passed via the context argument.
1303 1304 """
1304 1305
1305 1306 tr = lock = None
1306 1307 removed = list(ctx.removed())
1307 1308 p1, p2 = ctx.p1(), ctx.p2()
1308 1309 user = ctx.user()
1309 1310
1310 1311 lock = self.lock()
1311 1312 try:
1312 1313 tr = self.transaction("commit")
1313 1314 trp = weakref.proxy(tr)
1314 1315
1315 1316 if ctx.files():
1316 1317 m1 = p1.manifest().copy()
1317 1318 m2 = p2.manifest()
1318 1319
1319 1320 # check in files
1320 1321 new = {}
1321 1322 changed = []
1322 1323 linkrev = len(self)
1323 1324 for f in sorted(ctx.modified() + ctx.added()):
1324 1325 self.ui.note(f + "\n")
1325 1326 try:
1326 1327 fctx = ctx[f]
1327 1328 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1328 1329 changed)
1329 1330 m1.set(f, fctx.flags())
1330 1331 except OSError, inst:
1331 1332 self.ui.warn(_("trouble committing %s!\n") % f)
1332 1333 raise
1333 1334 except IOError, inst:
1334 1335 errcode = getattr(inst, 'errno', errno.ENOENT)
1335 1336 if error or errcode and errcode != errno.ENOENT:
1336 1337 self.ui.warn(_("trouble committing %s!\n") % f)
1337 1338 raise
1338 1339 else:
1339 1340 removed.append(f)
1340 1341
1341 1342 # update manifest
1342 1343 m1.update(new)
1343 1344 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1344 1345 drop = [f for f in removed if f in m1]
1345 1346 for f in drop:
1346 1347 del m1[f]
1347 1348 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1348 1349 p2.manifestnode(), (new, drop))
1349 1350 files = changed + removed
1350 1351 else:
1351 1352 mn = p1.manifestnode()
1352 1353 files = []
1353 1354
1354 1355 # update changelog
1355 1356 self.changelog.delayupdate()
1356 1357 n = self.changelog.add(mn, files, ctx.description(),
1357 1358 trp, p1.node(), p2.node(),
1358 1359 user, ctx.date(), ctx.extra().copy())
1359 1360 p = lambda: self.changelog.writepending() and self.root or ""
1360 1361 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1361 1362 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1362 1363 parent2=xp2, pending=p)
1363 1364 self.changelog.finalize(trp)
1364 1365 # set the new commit is proper phase
1365 1366 targetphase = phases.newcommitphase(self.ui)
1366 1367 if targetphase:
1367 1368 # retract boundary do not alter parent changeset.
1368 1369 # if a parent have higher the resulting phase will
1369 1370 # be compliant anyway
1370 1371 #
1371 1372 # if minimal phase was 0 we don't need to retract anything
1372 1373 phases.retractboundary(self, targetphase, [n])
1373 1374 tr.close()
1374 1375 branchmap.updatecache(self)
1375 1376 return n
1376 1377 finally:
1377 1378 if tr:
1378 1379 tr.release()
1379 1380 lock.release()
1380 1381
1381 1382 @unfilteredmethod
1383 def destroying(self):
1384 '''Inform the repository that nodes are about to be destroyed.
1385 Intended for use by strip and rollback, so there's a common
1386 place for anything that has to be done before destroying history.
1387
1388 This is mostly useful for saving state that is in memory and waiting
1389 to be flushed when the current lock is released. Because a call to
1390 destroyed is imminent, the repo will be invalidated causing those
1391 changes to stay in memory (waiting for the next unlock), or vanish
1392 completely.
1393 '''
1394 pass
1395
1396 @unfilteredmethod
1382 1397 def destroyed(self, newheadnodes=None):
1383 1398 '''Inform the repository that nodes have been destroyed.
1384 1399 Intended for use by strip and rollback, so there's a common
1385 1400 place for anything that has to be done after destroying history.
1386 1401
1387 1402 If you know the branchheadcache was uptodate before nodes were removed
1388 1403 and you also know the set of candidate new heads that may have resulted
1389 1404 from the destruction, you can set newheadnodes. This will enable the
1390 1405 code to update the branchheads cache, rather than having future code
1391 1406 decide it's invalid and regenerating it from scratch.
1392 1407 '''
1393 1408 # When one tries to:
1394 1409 # 1) destroy nodes thus calling this method (e.g. strip)
1395 1410 # 2) use phasecache somewhere (e.g. commit)
1396 1411 #
1397 1412 # then 2) will fail because the phasecache contains nodes that were
1398 1413 # removed. We can either remove phasecache from the filecache,
1399 1414 # causing it to reload next time it is accessed, or simply filter
1400 1415 # the removed nodes now and write the updated cache.
1401 1416 if '_phasecache' in self._filecache:
1402 1417 self._phasecache.filterunknown(self)
1403 1418 self._phasecache.write()
1404 1419
1405 1420 # If we have info, newheadnodes, on how to update the branch cache, do
1406 1421 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1407 1422 # will be caught the next time it is read.
1408 1423 if newheadnodes:
1409 1424 cl = self.changelog
1410 1425 revgen = (cl.rev(node) for node in newheadnodes
1411 1426 if cl.hasnode(node))
1412 1427 cache = self._branchcaches[None]
1413 1428 cache.update(self, revgen)
1414 1429 cache.write(self)
1415 1430
1416 1431 # Ensure the persistent tag cache is updated. Doing it now
1417 1432 # means that the tag cache only has to worry about destroyed
1418 1433 # heads immediately after a strip/rollback. That in turn
1419 1434 # guarantees that "cachetip == currenttip" (comparing both rev
1420 1435 # and node) always means no nodes have been added or destroyed.
1421 1436
1422 1437 # XXX this is suboptimal when qrefresh'ing: we strip the current
1423 1438 # head, refresh the tag cache, then immediately add a new head.
1424 1439 # But I think doing it this way is necessary for the "instant
1425 1440 # tag cache retrieval" case to work.
1426 1441 self.invalidatecaches()
1427 1442
1428 1443 # Discard all cache entries to force reloading everything.
1429 1444 self._filecache.clear()
1430 1445
1431 1446 def walk(self, match, node=None):
1432 1447 '''
1433 1448 walk recursively through the directory tree or a given
1434 1449 changeset, finding all files matched by the match
1435 1450 function
1436 1451 '''
1437 1452 return self[node].walk(match)
1438 1453
1439 1454 def status(self, node1='.', node2=None, match=None,
1440 1455 ignored=False, clean=False, unknown=False,
1441 1456 listsubrepos=False):
1442 1457 """return status of files between two nodes or node and working
1443 1458 directory.
1444 1459
1445 1460 If node1 is None, use the first dirstate parent instead.
1446 1461 If node2 is None, compare node1 with working directory.
1447 1462 """
1448 1463
1449 1464 def mfmatches(ctx):
1450 1465 mf = ctx.manifest().copy()
1451 1466 if match.always():
1452 1467 return mf
1453 1468 for fn in mf.keys():
1454 1469 if not match(fn):
1455 1470 del mf[fn]
1456 1471 return mf
1457 1472
1458 1473 if isinstance(node1, context.changectx):
1459 1474 ctx1 = node1
1460 1475 else:
1461 1476 ctx1 = self[node1]
1462 1477 if isinstance(node2, context.changectx):
1463 1478 ctx2 = node2
1464 1479 else:
1465 1480 ctx2 = self[node2]
1466 1481
1467 1482 working = ctx2.rev() is None
1468 1483 parentworking = working and ctx1 == self['.']
1469 1484 match = match or matchmod.always(self.root, self.getcwd())
1470 1485 listignored, listclean, listunknown = ignored, clean, unknown
1471 1486
1472 1487 # load earliest manifest first for caching reasons
1473 1488 if not working and ctx2.rev() < ctx1.rev():
1474 1489 ctx2.manifest()
1475 1490
1476 1491 if not parentworking:
1477 1492 def bad(f, msg):
1478 1493 # 'f' may be a directory pattern from 'match.files()',
1479 1494 # so 'f not in ctx1' is not enough
1480 1495 if f not in ctx1 and f not in ctx1.dirs():
1481 1496 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1482 1497 match.bad = bad
1483 1498
1484 1499 if working: # we need to scan the working dir
1485 1500 subrepos = []
1486 1501 if '.hgsub' in self.dirstate:
1487 1502 subrepos = ctx2.substate.keys()
1488 1503 s = self.dirstate.status(match, subrepos, listignored,
1489 1504 listclean, listunknown)
1490 1505 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1491 1506
1492 1507 # check for any possibly clean files
1493 1508 if parentworking and cmp:
1494 1509 fixup = []
1495 1510 # do a full compare of any files that might have changed
1496 1511 for f in sorted(cmp):
1497 1512 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1498 1513 or ctx1[f].cmp(ctx2[f])):
1499 1514 modified.append(f)
1500 1515 else:
1501 1516 fixup.append(f)
1502 1517
1503 1518 # update dirstate for files that are actually clean
1504 1519 if fixup:
1505 1520 if listclean:
1506 1521 clean += fixup
1507 1522
1508 1523 try:
1509 1524 # updating the dirstate is optional
1510 1525 # so we don't wait on the lock
1511 1526 wlock = self.wlock(False)
1512 1527 try:
1513 1528 for f in fixup:
1514 1529 self.dirstate.normal(f)
1515 1530 finally:
1516 1531 wlock.release()
1517 1532 except error.LockError:
1518 1533 pass
1519 1534
1520 1535 if not parentworking:
1521 1536 mf1 = mfmatches(ctx1)
1522 1537 if working:
1523 1538 # we are comparing working dir against non-parent
1524 1539 # generate a pseudo-manifest for the working dir
1525 1540 mf2 = mfmatches(self['.'])
1526 1541 for f in cmp + modified + added:
1527 1542 mf2[f] = None
1528 1543 mf2.set(f, ctx2.flags(f))
1529 1544 for f in removed:
1530 1545 if f in mf2:
1531 1546 del mf2[f]
1532 1547 else:
1533 1548 # we are comparing two revisions
1534 1549 deleted, unknown, ignored = [], [], []
1535 1550 mf2 = mfmatches(ctx2)
1536 1551
1537 1552 modified, added, clean = [], [], []
1538 1553 withflags = mf1.withflags() | mf2.withflags()
1539 1554 for fn in mf2:
1540 1555 if fn in mf1:
1541 1556 if (fn not in deleted and
1542 1557 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1543 1558 (mf1[fn] != mf2[fn] and
1544 1559 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1545 1560 modified.append(fn)
1546 1561 elif listclean:
1547 1562 clean.append(fn)
1548 1563 del mf1[fn]
1549 1564 elif fn not in deleted:
1550 1565 added.append(fn)
1551 1566 removed = mf1.keys()
1552 1567
1553 1568 if working and modified and not self.dirstate._checklink:
1554 1569 # Symlink placeholders may get non-symlink-like contents
1555 1570 # via user error or dereferencing by NFS or Samba servers,
1556 1571 # so we filter out any placeholders that don't look like a
1557 1572 # symlink
1558 1573 sane = []
1559 1574 for f in modified:
1560 1575 if ctx2.flags(f) == 'l':
1561 1576 d = ctx2[f].data()
1562 1577 if len(d) >= 1024 or '\n' in d or util.binary(d):
1563 1578 self.ui.debug('ignoring suspect symlink placeholder'
1564 1579 ' "%s"\n' % f)
1565 1580 continue
1566 1581 sane.append(f)
1567 1582 modified = sane
1568 1583
1569 1584 r = modified, added, removed, deleted, unknown, ignored, clean
1570 1585
1571 1586 if listsubrepos:
1572 1587 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1573 1588 if working:
1574 1589 rev2 = None
1575 1590 else:
1576 1591 rev2 = ctx2.substate[subpath][1]
1577 1592 try:
1578 1593 submatch = matchmod.narrowmatcher(subpath, match)
1579 1594 s = sub.status(rev2, match=submatch, ignored=listignored,
1580 1595 clean=listclean, unknown=listunknown,
1581 1596 listsubrepos=True)
1582 1597 for rfiles, sfiles in zip(r, s):
1583 1598 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1584 1599 except error.LookupError:
1585 1600 self.ui.status(_("skipping missing subrepository: %s\n")
1586 1601 % subpath)
1587 1602
1588 1603 for l in r:
1589 1604 l.sort()
1590 1605 return r
1591 1606
1592 1607 def heads(self, start=None):
1593 1608 heads = self.changelog.heads(start)
1594 1609 # sort the output in rev descending order
1595 1610 return sorted(heads, key=self.changelog.rev, reverse=True)
1596 1611
1597 1612 def branchheads(self, branch=None, start=None, closed=False):
1598 1613 '''return a (possibly filtered) list of heads for the given branch
1599 1614
1600 1615 Heads are returned in topological order, from newest to oldest.
1601 1616 If branch is None, use the dirstate branch.
1602 1617 If start is not None, return only heads reachable from start.
1603 1618 If closed is True, return heads that are marked as closed as well.
1604 1619 '''
1605 1620 if branch is None:
1606 1621 branch = self[None].branch()
1607 1622 branches = self.branchmap()
1608 1623 if branch not in branches:
1609 1624 return []
1610 1625 # the cache returns heads ordered lowest to highest
1611 1626 bheads = list(reversed(branches[branch]))
1612 1627 if start is not None:
1613 1628 # filter out the heads that cannot be reached from startrev
1614 1629 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1615 1630 bheads = [h for h in bheads if h in fbheads]
1616 1631 if not closed:
1617 1632 bheads = [h for h in bheads if not self[h].closesbranch()]
1618 1633 return bheads
1619 1634
1620 1635 def branches(self, nodes):
1621 1636 if not nodes:
1622 1637 nodes = [self.changelog.tip()]
1623 1638 b = []
1624 1639 for n in nodes:
1625 1640 t = n
1626 1641 while True:
1627 1642 p = self.changelog.parents(n)
1628 1643 if p[1] != nullid or p[0] == nullid:
1629 1644 b.append((t, n, p[0], p[1]))
1630 1645 break
1631 1646 n = p[0]
1632 1647 return b
1633 1648
1634 1649 def between(self, pairs):
1635 1650 r = []
1636 1651
1637 1652 for top, bottom in pairs:
1638 1653 n, l, i = top, [], 0
1639 1654 f = 1
1640 1655
1641 1656 while n != bottom and n != nullid:
1642 1657 p = self.changelog.parents(n)[0]
1643 1658 if i == f:
1644 1659 l.append(n)
1645 1660 f = f * 2
1646 1661 n = p
1647 1662 i += 1
1648 1663
1649 1664 r.append(l)
1650 1665
1651 1666 return r
1652 1667
1653 1668 def pull(self, remote, heads=None, force=False):
1654 1669 # don't open transaction for nothing or you break future useful
1655 1670 # rollback call
1656 1671 tr = None
1657 1672 trname = 'pull\n' + util.hidepassword(remote.url())
1658 1673 lock = self.lock()
1659 1674 try:
1660 1675 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1661 1676 force=force)
1662 1677 common, fetch, rheads = tmp
1663 1678 if not fetch:
1664 1679 self.ui.status(_("no changes found\n"))
1665 1680 added = []
1666 1681 result = 0
1667 1682 else:
1668 1683 tr = self.transaction(trname)
1669 1684 if heads is None and list(common) == [nullid]:
1670 1685 self.ui.status(_("requesting all changes\n"))
1671 1686 elif heads is None and remote.capable('changegroupsubset'):
1672 1687 # issue1320, avoid a race if remote changed after discovery
1673 1688 heads = rheads
1674 1689
1675 1690 if remote.capable('getbundle'):
1676 1691 cg = remote.getbundle('pull', common=common,
1677 1692 heads=heads or rheads)
1678 1693 elif heads is None:
1679 1694 cg = remote.changegroup(fetch, 'pull')
1680 1695 elif not remote.capable('changegroupsubset'):
1681 1696 raise util.Abort(_("partial pull cannot be done because "
1682 1697 "other repository doesn't support "
1683 1698 "changegroupsubset."))
1684 1699 else:
1685 1700 cg = remote.changegroupsubset(fetch, heads, 'pull')
1686 1701 clstart = len(self.changelog)
1687 1702 result = self.addchangegroup(cg, 'pull', remote.url())
1688 1703 clend = len(self.changelog)
1689 1704 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1690 1705
1691 1706 # compute target subset
1692 1707 if heads is None:
1693 1708 # We pulled every thing possible
1694 1709 # sync on everything common
1695 1710 subset = common + added
1696 1711 else:
1697 1712 # We pulled a specific subset
1698 1713 # sync on this subset
1699 1714 subset = heads
1700 1715
1701 1716 # Get remote phases data from remote
1702 1717 remotephases = remote.listkeys('phases')
1703 1718 publishing = bool(remotephases.get('publishing', False))
1704 1719 if remotephases and not publishing:
1705 1720 # remote is new and unpublishing
1706 1721 pheads, _dr = phases.analyzeremotephases(self, subset,
1707 1722 remotephases)
1708 1723 phases.advanceboundary(self, phases.public, pheads)
1709 1724 phases.advanceboundary(self, phases.draft, subset)
1710 1725 else:
1711 1726 # Remote is old or publishing all common changesets
1712 1727 # should be seen as public
1713 1728 phases.advanceboundary(self, phases.public, subset)
1714 1729
1715 1730 if obsolete._enabled:
1716 1731 self.ui.debug('fetching remote obsolete markers\n')
1717 1732 remoteobs = remote.listkeys('obsolete')
1718 1733 if 'dump0' in remoteobs:
1719 1734 if tr is None:
1720 1735 tr = self.transaction(trname)
1721 1736 for key in sorted(remoteobs, reverse=True):
1722 1737 if key.startswith('dump'):
1723 1738 data = base85.b85decode(remoteobs[key])
1724 1739 self.obsstore.mergemarkers(tr, data)
1725 1740 self.invalidatevolatilesets()
1726 1741 if tr is not None:
1727 1742 tr.close()
1728 1743 finally:
1729 1744 if tr is not None:
1730 1745 tr.release()
1731 1746 lock.release()
1732 1747
1733 1748 return result
1734 1749
1735 1750 def checkpush(self, force, revs):
1736 1751 """Extensions can override this function if additional checks have
1737 1752 to be performed before pushing, or call it if they override push
1738 1753 command.
1739 1754 """
1740 1755 pass
1741 1756
1742 1757 def push(self, remote, force=False, revs=None, newbranch=False):
1743 1758 '''Push outgoing changesets (limited by revs) from the current
1744 1759 repository to remote. Return an integer:
1745 1760 - None means nothing to push
1746 1761 - 0 means HTTP error
1747 1762 - 1 means we pushed and remote head count is unchanged *or*
1748 1763 we have outgoing changesets but refused to push
1749 1764 - other values as described by addchangegroup()
1750 1765 '''
1751 1766 # there are two ways to push to remote repo:
1752 1767 #
1753 1768 # addchangegroup assumes local user can lock remote
1754 1769 # repo (local filesystem, old ssh servers).
1755 1770 #
1756 1771 # unbundle assumes local user cannot lock remote repo (new ssh
1757 1772 # servers, http servers).
1758 1773
1759 1774 if not remote.canpush():
1760 1775 raise util.Abort(_("destination does not support push"))
1761 1776 unfi = self.unfiltered()
1762 1777 # get local lock as we might write phase data
1763 1778 locallock = self.lock()
1764 1779 try:
1765 1780 self.checkpush(force, revs)
1766 1781 lock = None
1767 1782 unbundle = remote.capable('unbundle')
1768 1783 if not unbundle:
1769 1784 lock = remote.lock()
1770 1785 try:
1771 1786 # discovery
1772 1787 fci = discovery.findcommonincoming
1773 1788 commoninc = fci(unfi, remote, force=force)
1774 1789 common, inc, remoteheads = commoninc
1775 1790 fco = discovery.findcommonoutgoing
1776 1791 outgoing = fco(unfi, remote, onlyheads=revs,
1777 1792 commoninc=commoninc, force=force)
1778 1793
1779 1794
1780 1795 if not outgoing.missing:
1781 1796 # nothing to push
1782 1797 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1783 1798 ret = None
1784 1799 else:
1785 1800 # something to push
1786 1801 if not force:
1787 1802 # if self.obsstore == False --> no obsolete
1788 1803 # then, save the iteration
1789 1804 if unfi.obsstore:
1790 1805 # this message are here for 80 char limit reason
1791 1806 mso = _("push includes obsolete changeset: %s!")
1792 1807 mst = "push includes %s changeset: %s!"
1793 1808 # plain versions for i18n tool to detect them
1794 1809 _("push includes unstable changeset: %s!")
1795 1810 _("push includes bumped changeset: %s!")
1796 1811 _("push includes divergent changeset: %s!")
1797 1812 # If we are to push if there is at least one
1798 1813 # obsolete or unstable changeset in missing, at
1799 1814 # least one of the missinghead will be obsolete or
1800 1815 # unstable. So checking heads only is ok
1801 1816 for node in outgoing.missingheads:
1802 1817 ctx = unfi[node]
1803 1818 if ctx.obsolete():
1804 1819 raise util.Abort(mso % ctx)
1805 1820 elif ctx.troubled():
1806 1821 raise util.Abort(_(mst)
1807 1822 % (ctx.troubles()[0],
1808 1823 ctx))
1809 1824 discovery.checkheads(unfi, remote, outgoing,
1810 1825 remoteheads, newbranch,
1811 1826 bool(inc))
1812 1827
1813 1828 # create a changegroup from local
1814 1829 if revs is None and not outgoing.excluded:
1815 1830 # push everything,
1816 1831 # use the fast path, no race possible on push
1817 1832 cg = self._changegroup(outgoing.missing, 'push')
1818 1833 else:
1819 1834 cg = self.getlocalbundle('push', outgoing)
1820 1835
1821 1836 # apply changegroup to remote
1822 1837 if unbundle:
1823 1838 # local repo finds heads on server, finds out what
1824 1839 # revs it must push. once revs transferred, if server
1825 1840 # finds it has different heads (someone else won
1826 1841 # commit/push race), server aborts.
1827 1842 if force:
1828 1843 remoteheads = ['force']
1829 1844 # ssh: return remote's addchangegroup()
1830 1845 # http: return remote's addchangegroup() or 0 for error
1831 1846 ret = remote.unbundle(cg, remoteheads, 'push')
1832 1847 else:
1833 1848 # we return an integer indicating remote head count
1834 1849 # change
1835 1850 ret = remote.addchangegroup(cg, 'push', self.url())
1836 1851
1837 1852 if ret:
1838 1853 # push succeed, synchronize target of the push
1839 1854 cheads = outgoing.missingheads
1840 1855 elif revs is None:
1841 1856 # All out push fails. synchronize all common
1842 1857 cheads = outgoing.commonheads
1843 1858 else:
1844 1859 # I want cheads = heads(::missingheads and ::commonheads)
1845 1860 # (missingheads is revs with secret changeset filtered out)
1846 1861 #
1847 1862 # This can be expressed as:
1848 1863 # cheads = ( (missingheads and ::commonheads)
1849 1864 # + (commonheads and ::missingheads))"
1850 1865 # )
1851 1866 #
1852 1867 # while trying to push we already computed the following:
1853 1868 # common = (::commonheads)
1854 1869 # missing = ((commonheads::missingheads) - commonheads)
1855 1870 #
1856 1871 # We can pick:
1857 1872 # * missingheads part of common (::commonheads)
1858 1873 common = set(outgoing.common)
1859 1874 cheads = [node for node in revs if node in common]
1860 1875 # and
1861 1876 # * commonheads parents on missing
1862 1877 revset = unfi.set('%ln and parents(roots(%ln))',
1863 1878 outgoing.commonheads,
1864 1879 outgoing.missing)
1865 1880 cheads.extend(c.node() for c in revset)
1866 1881 # even when we don't push, exchanging phase data is useful
1867 1882 remotephases = remote.listkeys('phases')
1868 1883 if not remotephases: # old server or public only repo
1869 1884 phases.advanceboundary(self, phases.public, cheads)
1870 1885 # don't push any phase data as there is nothing to push
1871 1886 else:
1872 1887 ana = phases.analyzeremotephases(self, cheads, remotephases)
1873 1888 pheads, droots = ana
1874 1889 ### Apply remote phase on local
1875 1890 if remotephases.get('publishing', False):
1876 1891 phases.advanceboundary(self, phases.public, cheads)
1877 1892 else: # publish = False
1878 1893 phases.advanceboundary(self, phases.public, pheads)
1879 1894 phases.advanceboundary(self, phases.draft, cheads)
1880 1895 ### Apply local phase on remote
1881 1896
1882 1897 # Get the list of all revs draft on remote by public here.
1883 1898 # XXX Beware that revset break if droots is not strictly
1884 1899 # XXX root we may want to ensure it is but it is costly
1885 1900 outdated = unfi.set('heads((%ln::%ln) and public())',
1886 1901 droots, cheads)
1887 1902 for newremotehead in outdated:
1888 1903 r = remote.pushkey('phases',
1889 1904 newremotehead.hex(),
1890 1905 str(phases.draft),
1891 1906 str(phases.public))
1892 1907 if not r:
1893 1908 self.ui.warn(_('updating %s to public failed!\n')
1894 1909 % newremotehead)
1895 1910 self.ui.debug('try to push obsolete markers to remote\n')
1896 1911 if (obsolete._enabled and self.obsstore and
1897 1912 'obsolete' in remote.listkeys('namespaces')):
1898 1913 rslts = []
1899 1914 remotedata = self.listkeys('obsolete')
1900 1915 for key in sorted(remotedata, reverse=True):
1901 1916 # reverse sort to ensure we end with dump0
1902 1917 data = remotedata[key]
1903 1918 rslts.append(remote.pushkey('obsolete', key, '', data))
1904 1919 if [r for r in rslts if not r]:
1905 1920 msg = _('failed to push some obsolete markers!\n')
1906 1921 self.ui.warn(msg)
1907 1922 finally:
1908 1923 if lock is not None:
1909 1924 lock.release()
1910 1925 finally:
1911 1926 locallock.release()
1912 1927
1913 1928 self.ui.debug("checking for updated bookmarks\n")
1914 1929 rb = remote.listkeys('bookmarks')
1915 1930 for k in rb.keys():
1916 1931 if k in unfi._bookmarks:
1917 1932 nr, nl = rb[k], hex(self._bookmarks[k])
1918 1933 if nr in unfi:
1919 1934 cr = unfi[nr]
1920 1935 cl = unfi[nl]
1921 1936 if bookmarks.validdest(unfi, cr, cl):
1922 1937 r = remote.pushkey('bookmarks', k, nr, nl)
1923 1938 if r:
1924 1939 self.ui.status(_("updating bookmark %s\n") % k)
1925 1940 else:
1926 1941 self.ui.warn(_('updating bookmark %s'
1927 1942 ' failed!\n') % k)
1928 1943
1929 1944 return ret
1930 1945
1931 1946 def changegroupinfo(self, nodes, source):
1932 1947 if self.ui.verbose or source == 'bundle':
1933 1948 self.ui.status(_("%d changesets found\n") % len(nodes))
1934 1949 if self.ui.debugflag:
1935 1950 self.ui.debug("list of changesets:\n")
1936 1951 for node in nodes:
1937 1952 self.ui.debug("%s\n" % hex(node))
1938 1953
1939 1954 def changegroupsubset(self, bases, heads, source):
1940 1955 """Compute a changegroup consisting of all the nodes that are
1941 1956 descendants of any of the bases and ancestors of any of the heads.
1942 1957 Return a chunkbuffer object whose read() method will return
1943 1958 successive changegroup chunks.
1944 1959
1945 1960 It is fairly complex as determining which filenodes and which
1946 1961 manifest nodes need to be included for the changeset to be complete
1947 1962 is non-trivial.
1948 1963
1949 1964 Another wrinkle is doing the reverse, figuring out which changeset in
1950 1965 the changegroup a particular filenode or manifestnode belongs to.
1951 1966 """
1952 1967 cl = self.changelog
1953 1968 if not bases:
1954 1969 bases = [nullid]
1955 1970 csets, bases, heads = cl.nodesbetween(bases, heads)
1956 1971 # We assume that all ancestors of bases are known
1957 1972 common = cl.ancestors([cl.rev(n) for n in bases])
1958 1973 return self._changegroupsubset(common, csets, heads, source)
1959 1974
1960 1975 def getlocalbundle(self, source, outgoing):
1961 1976 """Like getbundle, but taking a discovery.outgoing as an argument.
1962 1977
1963 1978 This is only implemented for local repos and reuses potentially
1964 1979 precomputed sets in outgoing."""
1965 1980 if not outgoing.missing:
1966 1981 return None
1967 1982 return self._changegroupsubset(outgoing.common,
1968 1983 outgoing.missing,
1969 1984 outgoing.missingheads,
1970 1985 source)
1971 1986
1972 1987 def getbundle(self, source, heads=None, common=None):
1973 1988 """Like changegroupsubset, but returns the set difference between the
1974 1989 ancestors of heads and the ancestors common.
1975 1990
1976 1991 If heads is None, use the local heads. If common is None, use [nullid].
1977 1992
1978 1993 The nodes in common might not all be known locally due to the way the
1979 1994 current discovery protocol works.
1980 1995 """
1981 1996 cl = self.changelog
1982 1997 if common:
1983 1998 hasnode = cl.hasnode
1984 1999 common = [n for n in common if hasnode(n)]
1985 2000 else:
1986 2001 common = [nullid]
1987 2002 if not heads:
1988 2003 heads = cl.heads()
1989 2004 return self.getlocalbundle(source,
1990 2005 discovery.outgoing(cl, common, heads))
1991 2006
1992 2007 @unfilteredmethod
1993 2008 def _changegroupsubset(self, commonrevs, csets, heads, source):
1994 2009
1995 2010 cl = self.changelog
1996 2011 mf = self.manifest
1997 2012 mfs = {} # needed manifests
1998 2013 fnodes = {} # needed file nodes
1999 2014 changedfiles = set()
2000 2015 fstate = ['', {}]
2001 2016 count = [0, 0]
2002 2017
2003 2018 # can we go through the fast path ?
2004 2019 heads.sort()
2005 2020 if heads == sorted(self.heads()):
2006 2021 return self._changegroup(csets, source)
2007 2022
2008 2023 # slow path
2009 2024 self.hook('preoutgoing', throw=True, source=source)
2010 2025 self.changegroupinfo(csets, source)
2011 2026
2012 2027 # filter any nodes that claim to be part of the known set
2013 2028 def prune(revlog, missing):
2014 2029 rr, rl = revlog.rev, revlog.linkrev
2015 2030 return [n for n in missing
2016 2031 if rl(rr(n)) not in commonrevs]
2017 2032
2018 2033 progress = self.ui.progress
2019 2034 _bundling = _('bundling')
2020 2035 _changesets = _('changesets')
2021 2036 _manifests = _('manifests')
2022 2037 _files = _('files')
2023 2038
2024 2039 def lookup(revlog, x):
2025 2040 if revlog == cl:
2026 2041 c = cl.read(x)
2027 2042 changedfiles.update(c[3])
2028 2043 mfs.setdefault(c[0], x)
2029 2044 count[0] += 1
2030 2045 progress(_bundling, count[0],
2031 2046 unit=_changesets, total=count[1])
2032 2047 return x
2033 2048 elif revlog == mf:
2034 2049 clnode = mfs[x]
2035 2050 mdata = mf.readfast(x)
2036 2051 for f, n in mdata.iteritems():
2037 2052 if f in changedfiles:
2038 2053 fnodes[f].setdefault(n, clnode)
2039 2054 count[0] += 1
2040 2055 progress(_bundling, count[0],
2041 2056 unit=_manifests, total=count[1])
2042 2057 return clnode
2043 2058 else:
2044 2059 progress(_bundling, count[0], item=fstate[0],
2045 2060 unit=_files, total=count[1])
2046 2061 return fstate[1][x]
2047 2062
2048 2063 bundler = changegroup.bundle10(lookup)
2049 2064 reorder = self.ui.config('bundle', 'reorder', 'auto')
2050 2065 if reorder == 'auto':
2051 2066 reorder = None
2052 2067 else:
2053 2068 reorder = util.parsebool(reorder)
2054 2069
2055 2070 def gengroup():
2056 2071 # Create a changenode group generator that will call our functions
2057 2072 # back to lookup the owning changenode and collect information.
2058 2073 count[:] = [0, len(csets)]
2059 2074 for chunk in cl.group(csets, bundler, reorder=reorder):
2060 2075 yield chunk
2061 2076 progress(_bundling, None)
2062 2077
2063 2078 # Create a generator for the manifestnodes that calls our lookup
2064 2079 # and data collection functions back.
2065 2080 for f in changedfiles:
2066 2081 fnodes[f] = {}
2067 2082 count[:] = [0, len(mfs)]
2068 2083 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2069 2084 yield chunk
2070 2085 progress(_bundling, None)
2071 2086
2072 2087 mfs.clear()
2073 2088
2074 2089 # Go through all our files in order sorted by name.
2075 2090 count[:] = [0, len(changedfiles)]
2076 2091 for fname in sorted(changedfiles):
2077 2092 filerevlog = self.file(fname)
2078 2093 if not len(filerevlog):
2079 2094 raise util.Abort(_("empty or missing revlog for %s")
2080 2095 % fname)
2081 2096 fstate[0] = fname
2082 2097 fstate[1] = fnodes.pop(fname, {})
2083 2098
2084 2099 nodelist = prune(filerevlog, fstate[1])
2085 2100 if nodelist:
2086 2101 count[0] += 1
2087 2102 yield bundler.fileheader(fname)
2088 2103 for chunk in filerevlog.group(nodelist, bundler, reorder):
2089 2104 yield chunk
2090 2105
2091 2106 # Signal that no more groups are left.
2092 2107 yield bundler.close()
2093 2108 progress(_bundling, None)
2094 2109
2095 2110 if csets:
2096 2111 self.hook('outgoing', node=hex(csets[0]), source=source)
2097 2112
2098 2113 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2099 2114
2100 2115 def changegroup(self, basenodes, source):
2101 2116 # to avoid a race we use changegroupsubset() (issue1320)
2102 2117 return self.changegroupsubset(basenodes, self.heads(), source)
2103 2118
2104 2119 @unfilteredmethod
2105 2120 def _changegroup(self, nodes, source):
2106 2121 """Compute the changegroup of all nodes that we have that a recipient
2107 2122 doesn't. Return a chunkbuffer object whose read() method will return
2108 2123 successive changegroup chunks.
2109 2124
2110 2125 This is much easier than the previous function as we can assume that
2111 2126 the recipient has any changenode we aren't sending them.
2112 2127
2113 2128 nodes is the set of nodes to send"""
2114 2129
2115 2130 cl = self.changelog
2116 2131 mf = self.manifest
2117 2132 mfs = {}
2118 2133 changedfiles = set()
2119 2134 fstate = ['']
2120 2135 count = [0, 0]
2121 2136
2122 2137 self.hook('preoutgoing', throw=True, source=source)
2123 2138 self.changegroupinfo(nodes, source)
2124 2139
2125 2140 revset = set([cl.rev(n) for n in nodes])
2126 2141
2127 2142 def gennodelst(log):
2128 2143 ln, llr = log.node, log.linkrev
2129 2144 return [ln(r) for r in log if llr(r) in revset]
2130 2145
2131 2146 progress = self.ui.progress
2132 2147 _bundling = _('bundling')
2133 2148 _changesets = _('changesets')
2134 2149 _manifests = _('manifests')
2135 2150 _files = _('files')
2136 2151
2137 2152 def lookup(revlog, x):
2138 2153 if revlog == cl:
2139 2154 c = cl.read(x)
2140 2155 changedfiles.update(c[3])
2141 2156 mfs.setdefault(c[0], x)
2142 2157 count[0] += 1
2143 2158 progress(_bundling, count[0],
2144 2159 unit=_changesets, total=count[1])
2145 2160 return x
2146 2161 elif revlog == mf:
2147 2162 count[0] += 1
2148 2163 progress(_bundling, count[0],
2149 2164 unit=_manifests, total=count[1])
2150 2165 return cl.node(revlog.linkrev(revlog.rev(x)))
2151 2166 else:
2152 2167 progress(_bundling, count[0], item=fstate[0],
2153 2168 total=count[1], unit=_files)
2154 2169 return cl.node(revlog.linkrev(revlog.rev(x)))
2155 2170
2156 2171 bundler = changegroup.bundle10(lookup)
2157 2172 reorder = self.ui.config('bundle', 'reorder', 'auto')
2158 2173 if reorder == 'auto':
2159 2174 reorder = None
2160 2175 else:
2161 2176 reorder = util.parsebool(reorder)
2162 2177
2163 2178 def gengroup():
2164 2179 '''yield a sequence of changegroup chunks (strings)'''
2165 2180 # construct a list of all changed files
2166 2181
2167 2182 count[:] = [0, len(nodes)]
2168 2183 for chunk in cl.group(nodes, bundler, reorder=reorder):
2169 2184 yield chunk
2170 2185 progress(_bundling, None)
2171 2186
2172 2187 count[:] = [0, len(mfs)]
2173 2188 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2174 2189 yield chunk
2175 2190 progress(_bundling, None)
2176 2191
2177 2192 count[:] = [0, len(changedfiles)]
2178 2193 for fname in sorted(changedfiles):
2179 2194 filerevlog = self.file(fname)
2180 2195 if not len(filerevlog):
2181 2196 raise util.Abort(_("empty or missing revlog for %s")
2182 2197 % fname)
2183 2198 fstate[0] = fname
2184 2199 nodelist = gennodelst(filerevlog)
2185 2200 if nodelist:
2186 2201 count[0] += 1
2187 2202 yield bundler.fileheader(fname)
2188 2203 for chunk in filerevlog.group(nodelist, bundler, reorder):
2189 2204 yield chunk
2190 2205 yield bundler.close()
2191 2206 progress(_bundling, None)
2192 2207
2193 2208 if nodes:
2194 2209 self.hook('outgoing', node=hex(nodes[0]), source=source)
2195 2210
2196 2211 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2197 2212
2198 2213 @unfilteredmethod
2199 2214 def addchangegroup(self, source, srctype, url, emptyok=False):
2200 2215 """Add the changegroup returned by source.read() to this repo.
2201 2216 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2202 2217 the URL of the repo where this changegroup is coming from.
2203 2218
2204 2219 Return an integer summarizing the change to this repo:
2205 2220 - nothing changed or no source: 0
2206 2221 - more heads than before: 1+added heads (2..n)
2207 2222 - fewer heads than before: -1-removed heads (-2..-n)
2208 2223 - number of heads stays the same: 1
2209 2224 """
2210 2225 def csmap(x):
2211 2226 self.ui.debug("add changeset %s\n" % short(x))
2212 2227 return len(cl)
2213 2228
2214 2229 def revmap(x):
2215 2230 return cl.rev(x)
2216 2231
2217 2232 if not source:
2218 2233 return 0
2219 2234
2220 2235 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2221 2236
2222 2237 changesets = files = revisions = 0
2223 2238 efiles = set()
2224 2239
2225 2240 # write changelog data to temp files so concurrent readers will not see
2226 2241 # inconsistent view
2227 2242 cl = self.changelog
2228 2243 cl.delayupdate()
2229 2244 oldheads = cl.heads()
2230 2245
2231 2246 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2232 2247 try:
2233 2248 trp = weakref.proxy(tr)
2234 2249 # pull off the changeset group
2235 2250 self.ui.status(_("adding changesets\n"))
2236 2251 clstart = len(cl)
2237 2252 class prog(object):
2238 2253 step = _('changesets')
2239 2254 count = 1
2240 2255 ui = self.ui
2241 2256 total = None
2242 2257 def __call__(self):
2243 2258 self.ui.progress(self.step, self.count, unit=_('chunks'),
2244 2259 total=self.total)
2245 2260 self.count += 1
2246 2261 pr = prog()
2247 2262 source.callback = pr
2248 2263
2249 2264 source.changelogheader()
2250 2265 srccontent = cl.addgroup(source, csmap, trp)
2251 2266 if not (srccontent or emptyok):
2252 2267 raise util.Abort(_("received changelog group is empty"))
2253 2268 clend = len(cl)
2254 2269 changesets = clend - clstart
2255 2270 for c in xrange(clstart, clend):
2256 2271 efiles.update(self[c].files())
2257 2272 efiles = len(efiles)
2258 2273 self.ui.progress(_('changesets'), None)
2259 2274
2260 2275 # pull off the manifest group
2261 2276 self.ui.status(_("adding manifests\n"))
2262 2277 pr.step = _('manifests')
2263 2278 pr.count = 1
2264 2279 pr.total = changesets # manifests <= changesets
2265 2280 # no need to check for empty manifest group here:
2266 2281 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2267 2282 # no new manifest will be created and the manifest group will
2268 2283 # be empty during the pull
2269 2284 source.manifestheader()
2270 2285 self.manifest.addgroup(source, revmap, trp)
2271 2286 self.ui.progress(_('manifests'), None)
2272 2287
2273 2288 needfiles = {}
2274 2289 if self.ui.configbool('server', 'validate', default=False):
2275 2290 # validate incoming csets have their manifests
2276 2291 for cset in xrange(clstart, clend):
2277 2292 mfest = self.changelog.read(self.changelog.node(cset))[0]
2278 2293 mfest = self.manifest.readdelta(mfest)
2279 2294 # store file nodes we must see
2280 2295 for f, n in mfest.iteritems():
2281 2296 needfiles.setdefault(f, set()).add(n)
2282 2297
2283 2298 # process the files
2284 2299 self.ui.status(_("adding file changes\n"))
2285 2300 pr.step = _('files')
2286 2301 pr.count = 1
2287 2302 pr.total = efiles
2288 2303 source.callback = None
2289 2304
2290 2305 while True:
2291 2306 chunkdata = source.filelogheader()
2292 2307 if not chunkdata:
2293 2308 break
2294 2309 f = chunkdata["filename"]
2295 2310 self.ui.debug("adding %s revisions\n" % f)
2296 2311 pr()
2297 2312 fl = self.file(f)
2298 2313 o = len(fl)
2299 2314 if not fl.addgroup(source, revmap, trp):
2300 2315 raise util.Abort(_("received file revlog group is empty"))
2301 2316 revisions += len(fl) - o
2302 2317 files += 1
2303 2318 if f in needfiles:
2304 2319 needs = needfiles[f]
2305 2320 for new in xrange(o, len(fl)):
2306 2321 n = fl.node(new)
2307 2322 if n in needs:
2308 2323 needs.remove(n)
2309 2324 if not needs:
2310 2325 del needfiles[f]
2311 2326 self.ui.progress(_('files'), None)
2312 2327
2313 2328 for f, needs in needfiles.iteritems():
2314 2329 fl = self.file(f)
2315 2330 for n in needs:
2316 2331 try:
2317 2332 fl.rev(n)
2318 2333 except error.LookupError:
2319 2334 raise util.Abort(
2320 2335 _('missing file data for %s:%s - run hg verify') %
2321 2336 (f, hex(n)))
2322 2337
2323 2338 dh = 0
2324 2339 if oldheads:
2325 2340 heads = cl.heads()
2326 2341 dh = len(heads) - len(oldheads)
2327 2342 for h in heads:
2328 2343 if h not in oldheads and self[h].closesbranch():
2329 2344 dh -= 1
2330 2345 htext = ""
2331 2346 if dh:
2332 2347 htext = _(" (%+d heads)") % dh
2333 2348
2334 2349 self.ui.status(_("added %d changesets"
2335 2350 " with %d changes to %d files%s\n")
2336 2351 % (changesets, revisions, files, htext))
2337 2352 self.invalidatevolatilesets()
2338 2353
2339 2354 if changesets > 0:
2340 2355 p = lambda: cl.writepending() and self.root or ""
2341 2356 self.hook('pretxnchangegroup', throw=True,
2342 2357 node=hex(cl.node(clstart)), source=srctype,
2343 2358 url=url, pending=p)
2344 2359
2345 2360 added = [cl.node(r) for r in xrange(clstart, clend)]
2346 2361 publishing = self.ui.configbool('phases', 'publish', True)
2347 2362 if srctype == 'push':
2348 2363 # Old server can not push the boundary themself.
2349 2364 # New server won't push the boundary if changeset already
2350 2365 # existed locally as secrete
2351 2366 #
2352 2367 # We should not use added here but the list of all change in
2353 2368 # the bundle
2354 2369 if publishing:
2355 2370 phases.advanceboundary(self, phases.public, srccontent)
2356 2371 else:
2357 2372 phases.advanceboundary(self, phases.draft, srccontent)
2358 2373 phases.retractboundary(self, phases.draft, added)
2359 2374 elif srctype != 'strip':
2360 2375 # publishing only alter behavior during push
2361 2376 #
2362 2377 # strip should not touch boundary at all
2363 2378 phases.retractboundary(self, phases.draft, added)
2364 2379
2365 2380 # make changelog see real files again
2366 2381 cl.finalize(trp)
2367 2382
2368 2383 tr.close()
2369 2384
2370 2385 if changesets > 0:
2371 2386 if srctype != 'strip':
2372 2387 # During strip, branchcache is invalid but coming call to
2373 2388 # `destroyed` will repair it.
2374 2389 # In other case we can safely update cache on disk.
2375 2390 branchmap.updatecache(self)
2376 2391 def runhooks():
2377 2392 # forcefully update the on-disk branch cache
2378 2393 self.ui.debug("updating the branch cache\n")
2379 2394 self.hook("changegroup", node=hex(cl.node(clstart)),
2380 2395 source=srctype, url=url)
2381 2396
2382 2397 for n in added:
2383 2398 self.hook("incoming", node=hex(n), source=srctype,
2384 2399 url=url)
2385 2400 self._afterlock(runhooks)
2386 2401
2387 2402 finally:
2388 2403 tr.release()
2389 2404 # never return 0 here:
2390 2405 if dh < 0:
2391 2406 return dh - 1
2392 2407 else:
2393 2408 return dh + 1
2394 2409
2395 2410 def stream_in(self, remote, requirements):
2396 2411 lock = self.lock()
2397 2412 try:
2398 2413 # Save remote branchmap. We will use it later
2399 2414 # to speed up branchcache creation
2400 2415 rbranchmap = None
2401 2416 if remote.capable("branchmap"):
2402 2417 rbranchmap = remote.branchmap()
2403 2418
2404 2419 fp = remote.stream_out()
2405 2420 l = fp.readline()
2406 2421 try:
2407 2422 resp = int(l)
2408 2423 except ValueError:
2409 2424 raise error.ResponseError(
2410 2425 _('unexpected response from remote server:'), l)
2411 2426 if resp == 1:
2412 2427 raise util.Abort(_('operation forbidden by server'))
2413 2428 elif resp == 2:
2414 2429 raise util.Abort(_('locking the remote repository failed'))
2415 2430 elif resp != 0:
2416 2431 raise util.Abort(_('the server sent an unknown error code'))
2417 2432 self.ui.status(_('streaming all changes\n'))
2418 2433 l = fp.readline()
2419 2434 try:
2420 2435 total_files, total_bytes = map(int, l.split(' ', 1))
2421 2436 except (ValueError, TypeError):
2422 2437 raise error.ResponseError(
2423 2438 _('unexpected response from remote server:'), l)
2424 2439 self.ui.status(_('%d files to transfer, %s of data\n') %
2425 2440 (total_files, util.bytecount(total_bytes)))
2426 2441 handled_bytes = 0
2427 2442 self.ui.progress(_('clone'), 0, total=total_bytes)
2428 2443 start = time.time()
2429 2444 for i in xrange(total_files):
2430 2445 # XXX doesn't support '\n' or '\r' in filenames
2431 2446 l = fp.readline()
2432 2447 try:
2433 2448 name, size = l.split('\0', 1)
2434 2449 size = int(size)
2435 2450 except (ValueError, TypeError):
2436 2451 raise error.ResponseError(
2437 2452 _('unexpected response from remote server:'), l)
2438 2453 if self.ui.debugflag:
2439 2454 self.ui.debug('adding %s (%s)\n' %
2440 2455 (name, util.bytecount(size)))
2441 2456 # for backwards compat, name was partially encoded
2442 2457 ofp = self.sopener(store.decodedir(name), 'w')
2443 2458 for chunk in util.filechunkiter(fp, limit=size):
2444 2459 handled_bytes += len(chunk)
2445 2460 self.ui.progress(_('clone'), handled_bytes,
2446 2461 total=total_bytes)
2447 2462 ofp.write(chunk)
2448 2463 ofp.close()
2449 2464 elapsed = time.time() - start
2450 2465 if elapsed <= 0:
2451 2466 elapsed = 0.001
2452 2467 self.ui.progress(_('clone'), None)
2453 2468 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2454 2469 (util.bytecount(total_bytes), elapsed,
2455 2470 util.bytecount(total_bytes / elapsed)))
2456 2471
2457 2472 # new requirements = old non-format requirements +
2458 2473 # new format-related
2459 2474 # requirements from the streamed-in repository
2460 2475 requirements.update(set(self.requirements) - self.supportedformats)
2461 2476 self._applyrequirements(requirements)
2462 2477 self._writerequirements()
2463 2478
2464 2479 if rbranchmap:
2465 2480 rbheads = []
2466 2481 for bheads in rbranchmap.itervalues():
2467 2482 rbheads.extend(bheads)
2468 2483
2469 2484 if rbheads:
2470 2485 rtiprev = max((int(self.changelog.rev(node))
2471 2486 for node in rbheads))
2472 2487 cache = branchmap.branchcache(rbranchmap,
2473 2488 self[rtiprev].node(),
2474 2489 rtiprev)
2475 2490 self._branchcaches[None] = cache
2476 2491 cache.write(self.unfiltered())
2477 2492 self.invalidate()
2478 2493 return len(self.heads()) + 1
2479 2494 finally:
2480 2495 lock.release()
2481 2496
2482 2497 def clone(self, remote, heads=[], stream=False):
2483 2498 '''clone remote repository.
2484 2499
2485 2500 keyword arguments:
2486 2501 heads: list of revs to clone (forces use of pull)
2487 2502 stream: use streaming clone if possible'''
2488 2503
2489 2504 # now, all clients that can request uncompressed clones can
2490 2505 # read repo formats supported by all servers that can serve
2491 2506 # them.
2492 2507
2493 2508 # if revlog format changes, client will have to check version
2494 2509 # and format flags on "stream" capability, and use
2495 2510 # uncompressed only if compatible.
2496 2511
2497 2512 if not stream:
2498 2513 # if the server explicitly prefers to stream (for fast LANs)
2499 2514 stream = remote.capable('stream-preferred')
2500 2515
2501 2516 if stream and not heads:
2502 2517 # 'stream' means remote revlog format is revlogv1 only
2503 2518 if remote.capable('stream'):
2504 2519 return self.stream_in(remote, set(('revlogv1',)))
2505 2520 # otherwise, 'streamreqs' contains the remote revlog format
2506 2521 streamreqs = remote.capable('streamreqs')
2507 2522 if streamreqs:
2508 2523 streamreqs = set(streamreqs.split(','))
2509 2524 # if we support it, stream in and adjust our requirements
2510 2525 if not streamreqs - self.supportedformats:
2511 2526 return self.stream_in(remote, streamreqs)
2512 2527 return self.pull(remote, heads)
2513 2528
2514 2529 def pushkey(self, namespace, key, old, new):
2515 2530 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2516 2531 old=old, new=new)
2517 2532 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2518 2533 ret = pushkey.push(self, namespace, key, old, new)
2519 2534 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2520 2535 ret=ret)
2521 2536 return ret
2522 2537
2523 2538 def listkeys(self, namespace):
2524 2539 self.hook('prelistkeys', throw=True, namespace=namespace)
2525 2540 self.ui.debug('listing keys for "%s"\n' % namespace)
2526 2541 values = pushkey.list(self, namespace)
2527 2542 self.hook('listkeys', namespace=namespace, values=values)
2528 2543 return values
2529 2544
2530 2545 def debugwireargs(self, one, two, three=None, four=None, five=None):
2531 2546 '''used to test argument passing over the wire'''
2532 2547 return "%s %s %s %s %s" % (one, two, three, four, five)
2533 2548
2534 2549 def savecommitmessage(self, text):
2535 2550 fp = self.opener('last-message.txt', 'wb')
2536 2551 try:
2537 2552 fp.write(text)
2538 2553 finally:
2539 2554 fp.close()
2540 2555 return self.pathto(fp.name[len(self.root) + 1:])
2541 2556
2542 2557 # used to avoid circular references so destructors work
2543 2558 def aftertrans(files):
2544 2559 renamefiles = [tuple(t) for t in files]
2545 2560 def a():
2546 2561 for src, dest in renamefiles:
2547 2562 try:
2548 2563 util.rename(src, dest)
2549 2564 except OSError: # journal file does not yet exist
2550 2565 pass
2551 2566 return a
2552 2567
2553 2568 def undoname(fn):
2554 2569 base, name = os.path.split(fn)
2555 2570 assert name.startswith('journal')
2556 2571 return os.path.join(base, name.replace('journal', 'undo', 1))
2557 2572
2558 2573 def instance(ui, path, create):
2559 2574 return localrepository(ui, util.urllocalpath(path), create)
2560 2575
2561 2576 def islocal(path):
2562 2577 return True
@@ -1,203 +1,204
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from mercurial import changegroup, branchmap
10 10 from mercurial.node import short
11 11 from mercurial.i18n import _
12 12 import os
13 13 import errno
14 14
15 15 def _bundle(repo, bases, heads, node, suffix, compress=True):
16 16 """create a bundle with the specified revisions as a backup"""
17 17 cg = repo.changegroupsubset(bases, heads, 'strip')
18 18 backupdir = repo.join("strip-backup")
19 19 if not os.path.isdir(backupdir):
20 20 os.mkdir(backupdir)
21 21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
22 22 if compress:
23 23 bundletype = "HG10BZ"
24 24 else:
25 25 bundletype = "HG10UN"
26 26 return changegroup.writebundle(cg, name, bundletype)
27 27
28 28 def _collectfiles(repo, striprev):
29 29 """find out the filelogs affected by the strip"""
30 30 files = set()
31 31
32 32 for x in xrange(striprev, len(repo)):
33 33 files.update(repo[x].files())
34 34
35 35 return sorted(files)
36 36
37 37 def _collectbrokencsets(repo, files, striprev):
38 38 """return the changesets which will be broken by the truncation"""
39 39 s = set()
40 40 def collectone(revlog):
41 41 linkgen = (revlog.linkrev(i) for i in revlog)
42 42 # find the truncation point of the revlog
43 43 for lrev in linkgen:
44 44 if lrev >= striprev:
45 45 break
46 46 # see if any revision after this point has a linkrev
47 47 # less than striprev (those will be broken by strip)
48 48 for lrev in linkgen:
49 49 if lrev < striprev:
50 50 s.add(lrev)
51 51
52 52 collectone(repo.manifest)
53 53 for fname in files:
54 54 collectone(repo.file(fname))
55 55
56 56 return s
57 57
58 58 def strip(ui, repo, nodelist, backup="all", topic='backup'):
59 59 repo = repo.unfiltered()
60 60 # It simplifies the logic around updating the branchheads cache if we only
61 61 # have to consider the effect of the stripped revisions and not revisions
62 62 # missing because the cache is out-of-date.
63 63 branchmap.updatecache(repo)
64 repo.destroying()
64 65
65 66 cl = repo.changelog
66 67 # TODO handle undo of merge sets
67 68 if isinstance(nodelist, str):
68 69 nodelist = [nodelist]
69 70 striplist = [cl.rev(node) for node in nodelist]
70 71 striprev = min(striplist)
71 72
72 73 # Generate set of branches who will have nodes stripped.
73 74 striprevs = repo.revs("%ld::", striplist)
74 75 stripbranches = set([repo[rev].branch() for rev in striprevs])
75 76
76 77 # Set of potential new heads resulting from the strip. The parents of any
77 78 # node removed could be a new head because the node to be removed could have
78 79 # been the only child of the parent.
79 80 newheadrevs = repo.revs("parents(%ld::) - %ld::", striprevs, striprevs)
80 81 newheadnodes = set([cl.node(rev) for rev in newheadrevs])
81 82 newheadbranches = set([repo[rev].branch() for rev in newheadrevs])
82 83
83 84 keeppartialbundle = backup == 'strip'
84 85
85 86 # Some revisions with rev > striprev may not be descendants of striprev.
86 87 # We have to find these revisions and put them in a bundle, so that
87 88 # we can restore them after the truncations.
88 89 # To create the bundle we use repo.changegroupsubset which requires
89 90 # the list of heads and bases of the set of interesting revisions.
90 91 # (head = revision in the set that has no descendant in the set;
91 92 # base = revision in the set that has no ancestor in the set)
92 93 tostrip = set(striplist)
93 94 for rev in striplist:
94 95 for desc in cl.descendants([rev]):
95 96 tostrip.add(desc)
96 97
97 98 files = _collectfiles(repo, striprev)
98 99 saverevs = _collectbrokencsets(repo, files, striprev)
99 100
100 101 # compute heads
101 102 saveheads = set(saverevs)
102 103 for r in xrange(striprev + 1, len(cl)):
103 104 if r not in tostrip:
104 105 saverevs.add(r)
105 106 saveheads.difference_update(cl.parentrevs(r))
106 107 saveheads.add(r)
107 108 saveheads = [cl.node(r) for r in saveheads]
108 109
109 110 # compute base nodes
110 111 if saverevs:
111 112 descendants = set(cl.descendants(saverevs))
112 113 saverevs.difference_update(descendants)
113 114 savebases = [cl.node(r) for r in saverevs]
114 115 stripbases = [cl.node(r) for r in tostrip]
115 116
116 117 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
117 118 # is much faster
118 119 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
119 120 if newbmtarget:
120 121 newbmtarget = repo[newbmtarget[0]].node()
121 122 else:
122 123 newbmtarget = '.'
123 124
124 125 bm = repo._bookmarks
125 126 updatebm = []
126 127 for m in bm:
127 128 rev = repo[bm[m]].rev()
128 129 if rev in tostrip:
129 130 updatebm.append(m)
130 131
131 132 # create a changegroup for all the branches we need to keep
132 133 backupfile = None
133 134 if backup == "all":
134 135 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
135 136 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
136 137 if saveheads or savebases:
137 138 # do not compress partial bundle if we remove it from disk later
138 139 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
139 140 compress=keeppartialbundle)
140 141
141 142 mfst = repo.manifest
142 143
143 144 tr = repo.transaction("strip")
144 145 offset = len(tr.entries)
145 146
146 147 try:
147 148 tr.startgroup()
148 149 cl.strip(striprev, tr)
149 150 mfst.strip(striprev, tr)
150 151 for fn in files:
151 152 repo.file(fn).strip(striprev, tr)
152 153 tr.endgroup()
153 154
154 155 try:
155 156 for i in xrange(offset, len(tr.entries)):
156 157 file, troffset, ignore = tr.entries[i]
157 158 repo.sopener(file, 'a').truncate(troffset)
158 159 tr.close()
159 160 except: # re-raises
160 161 tr.abort()
161 162 raise
162 163
163 164 if saveheads or savebases:
164 165 ui.note(_("adding branch\n"))
165 166 f = open(chgrpfile, "rb")
166 167 gen = changegroup.readbundle(f, chgrpfile)
167 168 if not repo.ui.verbose:
168 169 # silence internal shuffling chatter
169 170 repo.ui.pushbuffer()
170 171 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
171 172 if not repo.ui.verbose:
172 173 repo.ui.popbuffer()
173 174 f.close()
174 175 if not keeppartialbundle:
175 176 os.unlink(chgrpfile)
176 177
177 178 # remove undo files
178 179 for undofile in repo.undofiles():
179 180 try:
180 181 os.unlink(undofile)
181 182 except OSError, e:
182 183 if e.errno != errno.ENOENT:
183 184 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
184 185
185 186 for m in updatebm:
186 187 bm[m] = repo[newbmtarget].node()
187 188 bm.write()
188 189 except: # re-raises
189 190 if backupfile:
190 191 ui.warn(_("strip failed, full bundle stored in '%s'\n")
191 192 % backupfile)
192 193 elif saveheads:
193 194 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
194 195 % chgrpfile)
195 196 raise
196 197
197 198 if len(stripbranches) == 1 and len(newheadbranches) == 1 \
198 199 and stripbranches == newheadbranches:
199 200 repo.destroyed(newheadnodes)
200 201 else:
201 202 # Multiple branches involved in strip. Will allow branchcache to become
202 203 # invalid and later on rebuilt from scratch
203 204 repo.destroyed()
General Comments 0
You need to be logged in to leave comments. Login now