##// END OF EJS Templates
localrepo: make "undofiles()" return list of tuples "(vfs, relative filename)"...
FUJIWARA Katsunori -
r20975:37cdf1fc default
parent child Browse files
Show More
@@ -1,1885 +1,1885 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock as lockmod
12 12 import transaction, store, encoding, exchange
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 import branchmap, pathutil
20 20 propertycache = util.propertycache
21 21 filecache = scmutil.filecache
22 22
23 23 class repofilecache(filecache):
24 24 """All filecache usage on repo are done for logic that should be unfiltered
25 25 """
26 26
27 27 def __get__(self, repo, type=None):
28 28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 29 def __set__(self, repo, value):
30 30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 31 def __delete__(self, repo):
32 32 return super(repofilecache, self).__delete__(repo.unfiltered())
33 33
34 34 class storecache(repofilecache):
35 35 """filecache for files in the store"""
36 36 def join(self, obj, fname):
37 37 return obj.sjoin(fname)
38 38
39 39 class unfilteredpropertycache(propertycache):
40 40 """propertycache that apply to unfiltered repo only"""
41 41
42 42 def __get__(self, repo, type=None):
43 43 unfi = repo.unfiltered()
44 44 if unfi is repo:
45 45 return super(unfilteredpropertycache, self).__get__(unfi)
46 46 return getattr(unfi, self.name)
47 47
48 48 class filteredpropertycache(propertycache):
49 49 """propertycache that must take filtering in account"""
50 50
51 51 def cachevalue(self, obj, value):
52 52 object.__setattr__(obj, self.name, value)
53 53
54 54
55 55 def hasunfilteredcache(repo, name):
56 56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 57 return name in vars(repo.unfiltered())
58 58
59 59 def unfilteredmethod(orig):
60 60 """decorate method that always need to be run on unfiltered version"""
61 61 def wrapper(repo, *args, **kwargs):
62 62 return orig(repo.unfiltered(), *args, **kwargs)
63 63 return wrapper
64 64
65 65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 66 'bundle2', 'unbundle'))
67 67 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 68
69 69 class localpeer(peer.peerrepository):
70 70 '''peer for a local repo; reflects only the most recent API'''
71 71
72 72 def __init__(self, repo, caps=moderncaps):
73 73 peer.peerrepository.__init__(self)
74 74 self._repo = repo.filtered('served')
75 75 self.ui = repo.ui
76 76 self._caps = repo._restrictcapabilities(caps)
77 77 self.requirements = repo.requirements
78 78 self.supportedformats = repo.supportedformats
79 79
80 80 def close(self):
81 81 self._repo.close()
82 82
83 83 def _capabilities(self):
84 84 return self._caps
85 85
86 86 def local(self):
87 87 return self._repo
88 88
89 89 def canpush(self):
90 90 return True
91 91
92 92 def url(self):
93 93 return self._repo.url()
94 94
95 95 def lookup(self, key):
96 96 return self._repo.lookup(key)
97 97
98 98 def branchmap(self):
99 99 return self._repo.branchmap()
100 100
101 101 def heads(self):
102 102 return self._repo.heads()
103 103
104 104 def known(self, nodes):
105 105 return self._repo.known(nodes)
106 106
107 107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 108 format='HG10'):
109 109 return exchange.getbundle(self._repo, source, heads=heads,
110 110 common=common, bundlecaps=bundlecaps)
111 111
112 112 # TODO We might want to move the next two calls into legacypeer and add
113 113 # unbundle instead.
114 114
115 115 def unbundle(self, cg, heads, url):
116 116 """apply a bundle on a repo
117 117
118 118 This function handles the repo locking itself."""
119 119 try:
120 120 return exchange.unbundle(self._repo, cg, heads, 'push', url)
121 121 except exchange.PushRaced, exc:
122 122 raise error.ResponseError(_('push failed:'), exc.message)
123 123
124 124 def lock(self):
125 125 return self._repo.lock()
126 126
127 127 def addchangegroup(self, cg, source, url):
128 128 return changegroup.addchangegroup(self._repo, cg, source, url)
129 129
130 130 def pushkey(self, namespace, key, old, new):
131 131 return self._repo.pushkey(namespace, key, old, new)
132 132
133 133 def listkeys(self, namespace):
134 134 return self._repo.listkeys(namespace)
135 135
136 136 def debugwireargs(self, one, two, three=None, four=None, five=None):
137 137 '''used to test argument passing over the wire'''
138 138 return "%s %s %s %s %s" % (one, two, three, four, five)
139 139
140 140 class locallegacypeer(localpeer):
141 141 '''peer extension which implements legacy methods too; used for tests with
142 142 restricted capabilities'''
143 143
144 144 def __init__(self, repo):
145 145 localpeer.__init__(self, repo, caps=legacycaps)
146 146
147 147 def branches(self, nodes):
148 148 return self._repo.branches(nodes)
149 149
150 150 def between(self, pairs):
151 151 return self._repo.between(pairs)
152 152
153 153 def changegroup(self, basenodes, source):
154 154 return changegroup.changegroup(self._repo, basenodes, source)
155 155
156 156 def changegroupsubset(self, bases, heads, source):
157 157 return changegroup.changegroupsubset(self._repo, bases, heads, source)
158 158
159 159 class localrepository(object):
160 160
161 161 supportedformats = set(('revlogv1', 'generaldelta'))
162 162 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
163 163 'dotencode'))
164 164 openerreqs = set(('revlogv1', 'generaldelta'))
165 165 requirements = ['revlogv1']
166 166 filtername = None
167 167
168 168 # a list of (ui, featureset) functions.
169 169 # only functions defined in module of enabled extensions are invoked
170 170 featuresetupfuncs = set()
171 171
172 172 def _baserequirements(self, create):
173 173 return self.requirements[:]
174 174
175 175 def __init__(self, baseui, path=None, create=False):
176 176 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
177 177 self.wopener = self.wvfs
178 178 self.root = self.wvfs.base
179 179 self.path = self.wvfs.join(".hg")
180 180 self.origroot = path
181 181 self.auditor = pathutil.pathauditor(self.root, self._checknested)
182 182 self.vfs = scmutil.vfs(self.path)
183 183 self.opener = self.vfs
184 184 self.baseui = baseui
185 185 self.ui = baseui.copy()
186 186 self.ui.copy = baseui.copy # prevent copying repo configuration
187 187 # A list of callback to shape the phase if no data were found.
188 188 # Callback are in the form: func(repo, roots) --> processed root.
189 189 # This list it to be filled by extension during repo setup
190 190 self._phasedefaults = []
191 191 try:
192 192 self.ui.readconfig(self.join("hgrc"), self.root)
193 193 extensions.loadall(self.ui)
194 194 except IOError:
195 195 pass
196 196
197 197 if self.featuresetupfuncs:
198 198 self.supported = set(self._basesupported) # use private copy
199 199 extmods = set(m.__name__ for n, m
200 200 in extensions.extensions(self.ui))
201 201 for setupfunc in self.featuresetupfuncs:
202 202 if setupfunc.__module__ in extmods:
203 203 setupfunc(self.ui, self.supported)
204 204 else:
205 205 self.supported = self._basesupported
206 206
207 207 if not self.vfs.isdir():
208 208 if create:
209 209 if not self.wvfs.exists():
210 210 self.wvfs.makedirs()
211 211 self.vfs.makedir(notindexed=True)
212 212 requirements = self._baserequirements(create)
213 213 if self.ui.configbool('format', 'usestore', True):
214 214 self.vfs.mkdir("store")
215 215 requirements.append("store")
216 216 if self.ui.configbool('format', 'usefncache', True):
217 217 requirements.append("fncache")
218 218 if self.ui.configbool('format', 'dotencode', True):
219 219 requirements.append('dotencode')
220 220 # create an invalid changelog
221 221 self.vfs.append(
222 222 "00changelog.i",
223 223 '\0\0\0\2' # represents revlogv2
224 224 ' dummy changelog to prevent using the old repo layout'
225 225 )
226 226 if self.ui.configbool('format', 'generaldelta', False):
227 227 requirements.append("generaldelta")
228 228 requirements = set(requirements)
229 229 else:
230 230 raise error.RepoError(_("repository %s not found") % path)
231 231 elif create:
232 232 raise error.RepoError(_("repository %s already exists") % path)
233 233 else:
234 234 try:
235 235 requirements = scmutil.readrequires(self.vfs, self.supported)
236 236 except IOError, inst:
237 237 if inst.errno != errno.ENOENT:
238 238 raise
239 239 requirements = set()
240 240
241 241 self.sharedpath = self.path
242 242 try:
243 243 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
244 244 realpath=True)
245 245 s = vfs.base
246 246 if not vfs.exists():
247 247 raise error.RepoError(
248 248 _('.hg/sharedpath points to nonexistent directory %s') % s)
249 249 self.sharedpath = s
250 250 except IOError, inst:
251 251 if inst.errno != errno.ENOENT:
252 252 raise
253 253
254 254 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
255 255 self.spath = self.store.path
256 256 self.svfs = self.store.vfs
257 257 self.sopener = self.svfs
258 258 self.sjoin = self.store.join
259 259 self.vfs.createmode = self.store.createmode
260 260 self._applyrequirements(requirements)
261 261 if create:
262 262 self._writerequirements()
263 263
264 264
265 265 self._branchcaches = {}
266 266 self.filterpats = {}
267 267 self._datafilters = {}
268 268 self._transref = self._lockref = self._wlockref = None
269 269
270 270 # A cache for various files under .hg/ that tracks file changes,
271 271 # (used by the filecache decorator)
272 272 #
273 273 # Maps a property name to its util.filecacheentry
274 274 self._filecache = {}
275 275
276 276 # hold sets of revision to be filtered
277 277 # should be cleared when something might have changed the filter value:
278 278 # - new changesets,
279 279 # - phase change,
280 280 # - new obsolescence marker,
281 281 # - working directory parent change,
282 282 # - bookmark changes
283 283 self.filteredrevcache = {}
284 284
285 285 def close(self):
286 286 pass
287 287
288 288 def _restrictcapabilities(self, caps):
289 289 # bundle2 is not ready for prime time, drop it unless explicitly
290 290 # required by the tests (or some brave tester)
291 291 if not self.ui.configbool('server', 'bundle2', False):
292 292 caps = set(caps)
293 293 caps.discard('bundle2')
294 294 return caps
295 295
296 296 def _applyrequirements(self, requirements):
297 297 self.requirements = requirements
298 298 self.sopener.options = dict((r, 1) for r in requirements
299 299 if r in self.openerreqs)
300 300 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
301 301 if chunkcachesize is not None:
302 302 self.sopener.options['chunkcachesize'] = chunkcachesize
303 303
304 304 def _writerequirements(self):
305 305 reqfile = self.opener("requires", "w")
306 306 for r in sorted(self.requirements):
307 307 reqfile.write("%s\n" % r)
308 308 reqfile.close()
309 309
310 310 def _checknested(self, path):
311 311 """Determine if path is a legal nested repository."""
312 312 if not path.startswith(self.root):
313 313 return False
314 314 subpath = path[len(self.root) + 1:]
315 315 normsubpath = util.pconvert(subpath)
316 316
317 317 # XXX: Checking against the current working copy is wrong in
318 318 # the sense that it can reject things like
319 319 #
320 320 # $ hg cat -r 10 sub/x.txt
321 321 #
322 322 # if sub/ is no longer a subrepository in the working copy
323 323 # parent revision.
324 324 #
325 325 # However, it can of course also allow things that would have
326 326 # been rejected before, such as the above cat command if sub/
327 327 # is a subrepository now, but was a normal directory before.
328 328 # The old path auditor would have rejected by mistake since it
329 329 # panics when it sees sub/.hg/.
330 330 #
331 331 # All in all, checking against the working copy seems sensible
332 332 # since we want to prevent access to nested repositories on
333 333 # the filesystem *now*.
334 334 ctx = self[None]
335 335 parts = util.splitpath(subpath)
336 336 while parts:
337 337 prefix = '/'.join(parts)
338 338 if prefix in ctx.substate:
339 339 if prefix == normsubpath:
340 340 return True
341 341 else:
342 342 sub = ctx.sub(prefix)
343 343 return sub.checknested(subpath[len(prefix) + 1:])
344 344 else:
345 345 parts.pop()
346 346 return False
347 347
348 348 def peer(self):
349 349 return localpeer(self) # not cached to avoid reference cycle
350 350
351 351 def unfiltered(self):
352 352 """Return unfiltered version of the repository
353 353
354 354 Intended to be overwritten by filtered repo."""
355 355 return self
356 356
357 357 def filtered(self, name):
358 358 """Return a filtered version of a repository"""
359 359 # build a new class with the mixin and the current class
360 360 # (possibly subclass of the repo)
361 361 class proxycls(repoview.repoview, self.unfiltered().__class__):
362 362 pass
363 363 return proxycls(self, name)
364 364
365 365 @repofilecache('bookmarks')
366 366 def _bookmarks(self):
367 367 return bookmarks.bmstore(self)
368 368
369 369 @repofilecache('bookmarks.current')
370 370 def _bookmarkcurrent(self):
371 371 return bookmarks.readcurrent(self)
372 372
373 373 def bookmarkheads(self, bookmark):
374 374 name = bookmark.split('@', 1)[0]
375 375 heads = []
376 376 for mark, n in self._bookmarks.iteritems():
377 377 if mark.split('@', 1)[0] == name:
378 378 heads.append(n)
379 379 return heads
380 380
381 381 @storecache('phaseroots')
382 382 def _phasecache(self):
383 383 return phases.phasecache(self, self._phasedefaults)
384 384
385 385 @storecache('obsstore')
386 386 def obsstore(self):
387 387 store = obsolete.obsstore(self.sopener)
388 388 if store and not obsolete._enabled:
389 389 # message is rare enough to not be translated
390 390 msg = 'obsolete feature not enabled but %i markers found!\n'
391 391 self.ui.warn(msg % len(list(store)))
392 392 return store
393 393
394 394 @storecache('00changelog.i')
395 395 def changelog(self):
396 396 c = changelog.changelog(self.sopener)
397 397 if 'HG_PENDING' in os.environ:
398 398 p = os.environ['HG_PENDING']
399 399 if p.startswith(self.root):
400 400 c.readpending('00changelog.i.a')
401 401 return c
402 402
403 403 @storecache('00manifest.i')
404 404 def manifest(self):
405 405 return manifest.manifest(self.sopener)
406 406
407 407 @repofilecache('dirstate')
408 408 def dirstate(self):
409 409 warned = [0]
410 410 def validate(node):
411 411 try:
412 412 self.changelog.rev(node)
413 413 return node
414 414 except error.LookupError:
415 415 if not warned[0]:
416 416 warned[0] = True
417 417 self.ui.warn(_("warning: ignoring unknown"
418 418 " working parent %s!\n") % short(node))
419 419 return nullid
420 420
421 421 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
422 422
423 423 def __getitem__(self, changeid):
424 424 if changeid is None:
425 425 return context.workingctx(self)
426 426 return context.changectx(self, changeid)
427 427
428 428 def __contains__(self, changeid):
429 429 try:
430 430 return bool(self.lookup(changeid))
431 431 except error.RepoLookupError:
432 432 return False
433 433
434 434 def __nonzero__(self):
435 435 return True
436 436
437 437 def __len__(self):
438 438 return len(self.changelog)
439 439
440 440 def __iter__(self):
441 441 return iter(self.changelog)
442 442
443 443 def revs(self, expr, *args):
444 444 '''Return a list of revisions matching the given revset'''
445 445 expr = revset.formatspec(expr, *args)
446 446 m = revset.match(None, expr)
447 447 return m(self, revset.spanset(self))
448 448
449 449 def set(self, expr, *args):
450 450 '''
451 451 Yield a context for each matching revision, after doing arg
452 452 replacement via revset.formatspec
453 453 '''
454 454 for r in self.revs(expr, *args):
455 455 yield self[r]
456 456
457 457 def url(self):
458 458 return 'file:' + self.root
459 459
460 460 def hook(self, name, throw=False, **args):
461 461 return hook.hook(self.ui, self, name, throw, **args)
462 462
463 463 @unfilteredmethod
464 464 def _tag(self, names, node, message, local, user, date, extra={}):
465 465 if isinstance(names, str):
466 466 names = (names,)
467 467
468 468 branches = self.branchmap()
469 469 for name in names:
470 470 self.hook('pretag', throw=True, node=hex(node), tag=name,
471 471 local=local)
472 472 if name in branches:
473 473 self.ui.warn(_("warning: tag %s conflicts with existing"
474 474 " branch name\n") % name)
475 475
476 476 def writetags(fp, names, munge, prevtags):
477 477 fp.seek(0, 2)
478 478 if prevtags and prevtags[-1] != '\n':
479 479 fp.write('\n')
480 480 for name in names:
481 481 m = munge and munge(name) or name
482 482 if (self._tagscache.tagtypes and
483 483 name in self._tagscache.tagtypes):
484 484 old = self.tags().get(name, nullid)
485 485 fp.write('%s %s\n' % (hex(old), m))
486 486 fp.write('%s %s\n' % (hex(node), m))
487 487 fp.close()
488 488
489 489 prevtags = ''
490 490 if local:
491 491 try:
492 492 fp = self.opener('localtags', 'r+')
493 493 except IOError:
494 494 fp = self.opener('localtags', 'a')
495 495 else:
496 496 prevtags = fp.read()
497 497
498 498 # local tags are stored in the current charset
499 499 writetags(fp, names, None, prevtags)
500 500 for name in names:
501 501 self.hook('tag', node=hex(node), tag=name, local=local)
502 502 return
503 503
504 504 try:
505 505 fp = self.wfile('.hgtags', 'rb+')
506 506 except IOError, e:
507 507 if e.errno != errno.ENOENT:
508 508 raise
509 509 fp = self.wfile('.hgtags', 'ab')
510 510 else:
511 511 prevtags = fp.read()
512 512
513 513 # committed tags are stored in UTF-8
514 514 writetags(fp, names, encoding.fromlocal, prevtags)
515 515
516 516 fp.close()
517 517
518 518 self.invalidatecaches()
519 519
520 520 if '.hgtags' not in self.dirstate:
521 521 self[None].add(['.hgtags'])
522 522
523 523 m = matchmod.exact(self.root, '', ['.hgtags'])
524 524 tagnode = self.commit(message, user, date, extra=extra, match=m)
525 525
526 526 for name in names:
527 527 self.hook('tag', node=hex(node), tag=name, local=local)
528 528
529 529 return tagnode
530 530
531 531 def tag(self, names, node, message, local, user, date):
532 532 '''tag a revision with one or more symbolic names.
533 533
534 534 names is a list of strings or, when adding a single tag, names may be a
535 535 string.
536 536
537 537 if local is True, the tags are stored in a per-repository file.
538 538 otherwise, they are stored in the .hgtags file, and a new
539 539 changeset is committed with the change.
540 540
541 541 keyword arguments:
542 542
543 543 local: whether to store tags in non-version-controlled file
544 544 (default False)
545 545
546 546 message: commit message to use if committing
547 547
548 548 user: name of user to use if committing
549 549
550 550 date: date tuple to use if committing'''
551 551
552 552 if not local:
553 553 for x in self.status()[:5]:
554 554 if '.hgtags' in x:
555 555 raise util.Abort(_('working copy of .hgtags is changed '
556 556 '(please commit .hgtags manually)'))
557 557
558 558 self.tags() # instantiate the cache
559 559 self._tag(names, node, message, local, user, date)
560 560
561 561 @filteredpropertycache
562 562 def _tagscache(self):
563 563 '''Returns a tagscache object that contains various tags related
564 564 caches.'''
565 565
566 566 # This simplifies its cache management by having one decorated
567 567 # function (this one) and the rest simply fetch things from it.
568 568 class tagscache(object):
569 569 def __init__(self):
570 570 # These two define the set of tags for this repository. tags
571 571 # maps tag name to node; tagtypes maps tag name to 'global' or
572 572 # 'local'. (Global tags are defined by .hgtags across all
573 573 # heads, and local tags are defined in .hg/localtags.)
574 574 # They constitute the in-memory cache of tags.
575 575 self.tags = self.tagtypes = None
576 576
577 577 self.nodetagscache = self.tagslist = None
578 578
579 579 cache = tagscache()
580 580 cache.tags, cache.tagtypes = self._findtags()
581 581
582 582 return cache
583 583
584 584 def tags(self):
585 585 '''return a mapping of tag to node'''
586 586 t = {}
587 587 if self.changelog.filteredrevs:
588 588 tags, tt = self._findtags()
589 589 else:
590 590 tags = self._tagscache.tags
591 591 for k, v in tags.iteritems():
592 592 try:
593 593 # ignore tags to unknown nodes
594 594 self.changelog.rev(v)
595 595 t[k] = v
596 596 except (error.LookupError, ValueError):
597 597 pass
598 598 return t
599 599
600 600 def _findtags(self):
601 601 '''Do the hard work of finding tags. Return a pair of dicts
602 602 (tags, tagtypes) where tags maps tag name to node, and tagtypes
603 603 maps tag name to a string like \'global\' or \'local\'.
604 604 Subclasses or extensions are free to add their own tags, but
605 605 should be aware that the returned dicts will be retained for the
606 606 duration of the localrepo object.'''
607 607
608 608 # XXX what tagtype should subclasses/extensions use? Currently
609 609 # mq and bookmarks add tags, but do not set the tagtype at all.
610 610 # Should each extension invent its own tag type? Should there
611 611 # be one tagtype for all such "virtual" tags? Or is the status
612 612 # quo fine?
613 613
614 614 alltags = {} # map tag name to (node, hist)
615 615 tagtypes = {}
616 616
617 617 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
618 618 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
619 619
620 620 # Build the return dicts. Have to re-encode tag names because
621 621 # the tags module always uses UTF-8 (in order not to lose info
622 622 # writing to the cache), but the rest of Mercurial wants them in
623 623 # local encoding.
624 624 tags = {}
625 625 for (name, (node, hist)) in alltags.iteritems():
626 626 if node != nullid:
627 627 tags[encoding.tolocal(name)] = node
628 628 tags['tip'] = self.changelog.tip()
629 629 tagtypes = dict([(encoding.tolocal(name), value)
630 630 for (name, value) in tagtypes.iteritems()])
631 631 return (tags, tagtypes)
632 632
633 633 def tagtype(self, tagname):
634 634 '''
635 635 return the type of the given tag. result can be:
636 636
637 637 'local' : a local tag
638 638 'global' : a global tag
639 639 None : tag does not exist
640 640 '''
641 641
642 642 return self._tagscache.tagtypes.get(tagname)
643 643
644 644 def tagslist(self):
645 645 '''return a list of tags ordered by revision'''
646 646 if not self._tagscache.tagslist:
647 647 l = []
648 648 for t, n in self.tags().iteritems():
649 649 r = self.changelog.rev(n)
650 650 l.append((r, t, n))
651 651 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
652 652
653 653 return self._tagscache.tagslist
654 654
655 655 def nodetags(self, node):
656 656 '''return the tags associated with a node'''
657 657 if not self._tagscache.nodetagscache:
658 658 nodetagscache = {}
659 659 for t, n in self._tagscache.tags.iteritems():
660 660 nodetagscache.setdefault(n, []).append(t)
661 661 for tags in nodetagscache.itervalues():
662 662 tags.sort()
663 663 self._tagscache.nodetagscache = nodetagscache
664 664 return self._tagscache.nodetagscache.get(node, [])
665 665
666 666 def nodebookmarks(self, node):
667 667 marks = []
668 668 for bookmark, n in self._bookmarks.iteritems():
669 669 if n == node:
670 670 marks.append(bookmark)
671 671 return sorted(marks)
672 672
673 673 def branchmap(self):
674 674 '''returns a dictionary {branch: [branchheads]} with branchheads
675 675 ordered by increasing revision number'''
676 676 branchmap.updatecache(self)
677 677 return self._branchcaches[self.filtername]
678 678
679 679 def branchtip(self, branch):
680 680 '''return the tip node for a given branch'''
681 681 try:
682 682 return self.branchmap().branchtip(branch)
683 683 except KeyError:
684 684 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
685 685
686 686 def lookup(self, key):
687 687 return self[key].node()
688 688
689 689 def lookupbranch(self, key, remote=None):
690 690 repo = remote or self
691 691 if key in repo.branchmap():
692 692 return key
693 693
694 694 repo = (remote and remote.local()) and remote or self
695 695 return repo[key].branch()
696 696
697 697 def known(self, nodes):
698 698 nm = self.changelog.nodemap
699 699 pc = self._phasecache
700 700 result = []
701 701 for n in nodes:
702 702 r = nm.get(n)
703 703 resp = not (r is None or pc.phase(self, r) >= phases.secret)
704 704 result.append(resp)
705 705 return result
706 706
707 707 def local(self):
708 708 return self
709 709
710 710 def cancopy(self):
711 711 # so statichttprepo's override of local() works
712 712 if not self.local():
713 713 return False
714 714 if not self.ui.configbool('phases', 'publish', True):
715 715 return True
716 716 # if publishing we can't copy if there is filtered content
717 717 return not self.filtered('visible').changelog.filteredrevs
718 718
719 719 def join(self, f):
720 720 return os.path.join(self.path, f)
721 721
722 722 def wjoin(self, f):
723 723 return os.path.join(self.root, f)
724 724
725 725 def file(self, f):
726 726 if f[0] == '/':
727 727 f = f[1:]
728 728 return filelog.filelog(self.sopener, f)
729 729
730 730 def changectx(self, changeid):
731 731 return self[changeid]
732 732
733 733 def parents(self, changeid=None):
734 734 '''get list of changectxs for parents of changeid'''
735 735 return self[changeid].parents()
736 736
737 737 def setparents(self, p1, p2=nullid):
738 738 copies = self.dirstate.setparents(p1, p2)
739 739 pctx = self[p1]
740 740 if copies:
741 741 # Adjust copy records, the dirstate cannot do it, it
742 742 # requires access to parents manifests. Preserve them
743 743 # only for entries added to first parent.
744 744 for f in copies:
745 745 if f not in pctx and copies[f] in pctx:
746 746 self.dirstate.copy(copies[f], f)
747 747 if p2 == nullid:
748 748 for f, s in sorted(self.dirstate.copies().items()):
749 749 if f not in pctx and s not in pctx:
750 750 self.dirstate.copy(None, f)
751 751
752 752 def filectx(self, path, changeid=None, fileid=None):
753 753 """changeid can be a changeset revision, node, or tag.
754 754 fileid can be a file revision or node."""
755 755 return context.filectx(self, path, changeid, fileid)
756 756
757 757 def getcwd(self):
758 758 return self.dirstate.getcwd()
759 759
760 760 def pathto(self, f, cwd=None):
761 761 return self.dirstate.pathto(f, cwd)
762 762
763 763 def wfile(self, f, mode='r'):
764 764 return self.wopener(f, mode)
765 765
766 766 def _link(self, f):
767 767 return self.wvfs.islink(f)
768 768
769 769 def _loadfilter(self, filter):
770 770 if filter not in self.filterpats:
771 771 l = []
772 772 for pat, cmd in self.ui.configitems(filter):
773 773 if cmd == '!':
774 774 continue
775 775 mf = matchmod.match(self.root, '', [pat])
776 776 fn = None
777 777 params = cmd
778 778 for name, filterfn in self._datafilters.iteritems():
779 779 if cmd.startswith(name):
780 780 fn = filterfn
781 781 params = cmd[len(name):].lstrip()
782 782 break
783 783 if not fn:
784 784 fn = lambda s, c, **kwargs: util.filter(s, c)
785 785 # Wrap old filters not supporting keyword arguments
786 786 if not inspect.getargspec(fn)[2]:
787 787 oldfn = fn
788 788 fn = lambda s, c, **kwargs: oldfn(s, c)
789 789 l.append((mf, fn, params))
790 790 self.filterpats[filter] = l
791 791 return self.filterpats[filter]
792 792
793 793 def _filter(self, filterpats, filename, data):
794 794 for mf, fn, cmd in filterpats:
795 795 if mf(filename):
796 796 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
797 797 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
798 798 break
799 799
800 800 return data
801 801
802 802 @unfilteredpropertycache
803 803 def _encodefilterpats(self):
804 804 return self._loadfilter('encode')
805 805
806 806 @unfilteredpropertycache
807 807 def _decodefilterpats(self):
808 808 return self._loadfilter('decode')
809 809
810 810 def adddatafilter(self, name, filter):
811 811 self._datafilters[name] = filter
812 812
813 813 def wread(self, filename):
814 814 if self._link(filename):
815 815 data = self.wvfs.readlink(filename)
816 816 else:
817 817 data = self.wopener.read(filename)
818 818 return self._filter(self._encodefilterpats, filename, data)
819 819
820 820 def wwrite(self, filename, data, flags):
821 821 data = self._filter(self._decodefilterpats, filename, data)
822 822 if 'l' in flags:
823 823 self.wopener.symlink(data, filename)
824 824 else:
825 825 self.wopener.write(filename, data)
826 826 if 'x' in flags:
827 827 self.wvfs.setflags(filename, False, True)
828 828
829 829 def wwritedata(self, filename, data):
830 830 return self._filter(self._decodefilterpats, filename, data)
831 831
832 832 def transaction(self, desc, report=None):
833 833 tr = self._transref and self._transref() or None
834 834 if tr and tr.running():
835 835 return tr.nest()
836 836
837 837 # abort here if the journal already exists
838 838 if self.svfs.exists("journal"):
839 839 raise error.RepoError(
840 840 _("abandoned transaction found - run hg recover"))
841 841
842 842 def onclose():
843 843 self.store.write(tr)
844 844
845 845 self._writejournal(desc)
846 846 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
847 847 rp = report and report or self.ui.warn
848 848 tr = transaction.transaction(rp, self.sopener,
849 849 "journal",
850 850 aftertrans(renames),
851 851 self.store.createmode,
852 852 onclose)
853 853 self._transref = weakref.ref(tr)
854 854 return tr
855 855
856 856 def _journalfiles(self):
857 857 return ((self.svfs, 'journal'),
858 858 (self.vfs, 'journal.dirstate'),
859 859 (self.vfs, 'journal.branch'),
860 860 (self.vfs, 'journal.desc'),
861 861 (self.vfs, 'journal.bookmarks'),
862 862 (self.svfs, 'journal.phaseroots'))
863 863
864 864 def undofiles(self):
865 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
865 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
866 866
867 867 def _writejournal(self, desc):
868 868 self.opener.write("journal.dirstate",
869 869 self.opener.tryread("dirstate"))
870 870 self.opener.write("journal.branch",
871 871 encoding.fromlocal(self.dirstate.branch()))
872 872 self.opener.write("journal.desc",
873 873 "%d\n%s\n" % (len(self), desc))
874 874 self.opener.write("journal.bookmarks",
875 875 self.opener.tryread("bookmarks"))
876 876 self.sopener.write("journal.phaseroots",
877 877 self.sopener.tryread("phaseroots"))
878 878
879 879 def recover(self):
880 880 lock = self.lock()
881 881 try:
882 882 if self.svfs.exists("journal"):
883 883 self.ui.status(_("rolling back interrupted transaction\n"))
884 884 transaction.rollback(self.sopener, "journal",
885 885 self.ui.warn)
886 886 self.invalidate()
887 887 return True
888 888 else:
889 889 self.ui.warn(_("no interrupted transaction available\n"))
890 890 return False
891 891 finally:
892 892 lock.release()
893 893
894 894 def rollback(self, dryrun=False, force=False):
895 895 wlock = lock = None
896 896 try:
897 897 wlock = self.wlock()
898 898 lock = self.lock()
899 899 if self.svfs.exists("undo"):
900 900 return self._rollback(dryrun, force)
901 901 else:
902 902 self.ui.warn(_("no rollback information available\n"))
903 903 return 1
904 904 finally:
905 905 release(lock, wlock)
906 906
907 907 @unfilteredmethod # Until we get smarter cache management
908 908 def _rollback(self, dryrun, force):
909 909 ui = self.ui
910 910 try:
911 911 args = self.opener.read('undo.desc').splitlines()
912 912 (oldlen, desc, detail) = (int(args[0]), args[1], None)
913 913 if len(args) >= 3:
914 914 detail = args[2]
915 915 oldtip = oldlen - 1
916 916
917 917 if detail and ui.verbose:
918 918 msg = (_('repository tip rolled back to revision %s'
919 919 ' (undo %s: %s)\n')
920 920 % (oldtip, desc, detail))
921 921 else:
922 922 msg = (_('repository tip rolled back to revision %s'
923 923 ' (undo %s)\n')
924 924 % (oldtip, desc))
925 925 except IOError:
926 926 msg = _('rolling back unknown transaction\n')
927 927 desc = None
928 928
929 929 if not force and self['.'] != self['tip'] and desc == 'commit':
930 930 raise util.Abort(
931 931 _('rollback of last commit while not checked out '
932 932 'may lose data'), hint=_('use -f to force'))
933 933
934 934 ui.status(msg)
935 935 if dryrun:
936 936 return 0
937 937
938 938 parents = self.dirstate.parents()
939 939 self.destroying()
940 940 transaction.rollback(self.sopener, 'undo', ui.warn)
941 941 if self.vfs.exists('undo.bookmarks'):
942 942 self.vfs.rename('undo.bookmarks', 'bookmarks')
943 943 if self.svfs.exists('undo.phaseroots'):
944 944 self.svfs.rename('undo.phaseroots', 'phaseroots')
945 945 self.invalidate()
946 946
947 947 parentgone = (parents[0] not in self.changelog.nodemap or
948 948 parents[1] not in self.changelog.nodemap)
949 949 if parentgone:
950 950 self.vfs.rename('undo.dirstate', 'dirstate')
951 951 try:
952 952 branch = self.opener.read('undo.branch')
953 953 self.dirstate.setbranch(encoding.tolocal(branch))
954 954 except IOError:
955 955 ui.warn(_('named branch could not be reset: '
956 956 'current branch is still \'%s\'\n')
957 957 % self.dirstate.branch())
958 958
959 959 self.dirstate.invalidate()
960 960 parents = tuple([p.rev() for p in self.parents()])
961 961 if len(parents) > 1:
962 962 ui.status(_('working directory now based on '
963 963 'revisions %d and %d\n') % parents)
964 964 else:
965 965 ui.status(_('working directory now based on '
966 966 'revision %d\n') % parents)
967 967 # TODO: if we know which new heads may result from this rollback, pass
968 968 # them to destroy(), which will prevent the branchhead cache from being
969 969 # invalidated.
970 970 self.destroyed()
971 971 return 0
972 972
973 973 def invalidatecaches(self):
974 974
975 975 if '_tagscache' in vars(self):
976 976 # can't use delattr on proxy
977 977 del self.__dict__['_tagscache']
978 978
979 979 self.unfiltered()._branchcaches.clear()
980 980 self.invalidatevolatilesets()
981 981
982 982 def invalidatevolatilesets(self):
983 983 self.filteredrevcache.clear()
984 984 obsolete.clearobscaches(self)
985 985
986 986 def invalidatedirstate(self):
987 987 '''Invalidates the dirstate, causing the next call to dirstate
988 988 to check if it was modified since the last time it was read,
989 989 rereading it if it has.
990 990
991 991 This is different to dirstate.invalidate() that it doesn't always
992 992 rereads the dirstate. Use dirstate.invalidate() if you want to
993 993 explicitly read the dirstate again (i.e. restoring it to a previous
994 994 known good state).'''
995 995 if hasunfilteredcache(self, 'dirstate'):
996 996 for k in self.dirstate._filecache:
997 997 try:
998 998 delattr(self.dirstate, k)
999 999 except AttributeError:
1000 1000 pass
1001 1001 delattr(self.unfiltered(), 'dirstate')
1002 1002
1003 1003 def invalidate(self):
1004 1004 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1005 1005 for k in self._filecache:
1006 1006 # dirstate is invalidated separately in invalidatedirstate()
1007 1007 if k == 'dirstate':
1008 1008 continue
1009 1009
1010 1010 try:
1011 1011 delattr(unfiltered, k)
1012 1012 except AttributeError:
1013 1013 pass
1014 1014 self.invalidatecaches()
1015 1015 self.store.invalidatecaches()
1016 1016
1017 1017 def invalidateall(self):
1018 1018 '''Fully invalidates both store and non-store parts, causing the
1019 1019 subsequent operation to reread any outside changes.'''
1020 1020 # extension should hook this to invalidate its caches
1021 1021 self.invalidate()
1022 1022 self.invalidatedirstate()
1023 1023
1024 1024 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1025 1025 try:
1026 1026 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1027 1027 except error.LockHeld, inst:
1028 1028 if not wait:
1029 1029 raise
1030 1030 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1031 1031 (desc, inst.locker))
1032 1032 # default to 600 seconds timeout
1033 1033 l = lockmod.lock(vfs, lockname,
1034 1034 int(self.ui.config("ui", "timeout", "600")),
1035 1035 releasefn, desc=desc)
1036 1036 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1037 1037 if acquirefn:
1038 1038 acquirefn()
1039 1039 return l
1040 1040
1041 1041 def _afterlock(self, callback):
1042 1042 """add a callback to the current repository lock.
1043 1043
1044 1044 The callback will be executed on lock release."""
1045 1045 l = self._lockref and self._lockref()
1046 1046 if l:
1047 1047 l.postrelease.append(callback)
1048 1048 else:
1049 1049 callback()
1050 1050
1051 1051 def lock(self, wait=True):
1052 1052 '''Lock the repository store (.hg/store) and return a weak reference
1053 1053 to the lock. Use this before modifying the store (e.g. committing or
1054 1054 stripping). If you are opening a transaction, get a lock as well.)'''
1055 1055 l = self._lockref and self._lockref()
1056 1056 if l is not None and l.held:
1057 1057 l.lock()
1058 1058 return l
1059 1059
1060 1060 def unlock():
1061 1061 if hasunfilteredcache(self, '_phasecache'):
1062 1062 self._phasecache.write()
1063 1063 for k, ce in self._filecache.items():
1064 1064 if k == 'dirstate' or k not in self.__dict__:
1065 1065 continue
1066 1066 ce.refresh()
1067 1067
1068 1068 l = self._lock(self.svfs, "lock", wait, unlock,
1069 1069 self.invalidate, _('repository %s') % self.origroot)
1070 1070 self._lockref = weakref.ref(l)
1071 1071 return l
1072 1072
1073 1073 def wlock(self, wait=True):
1074 1074 '''Lock the non-store parts of the repository (everything under
1075 1075 .hg except .hg/store) and return a weak reference to the lock.
1076 1076 Use this before modifying files in .hg.'''
1077 1077 l = self._wlockref and self._wlockref()
1078 1078 if l is not None and l.held:
1079 1079 l.lock()
1080 1080 return l
1081 1081
1082 1082 def unlock():
1083 1083 self.dirstate.write()
1084 1084 self._filecache['dirstate'].refresh()
1085 1085
1086 1086 l = self._lock(self.vfs, "wlock", wait, unlock,
1087 1087 self.invalidatedirstate, _('working directory of %s') %
1088 1088 self.origroot)
1089 1089 self._wlockref = weakref.ref(l)
1090 1090 return l
1091 1091
1092 1092 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1093 1093 """
1094 1094 commit an individual file as part of a larger transaction
1095 1095 """
1096 1096
1097 1097 fname = fctx.path()
1098 1098 text = fctx.data()
1099 1099 flog = self.file(fname)
1100 1100 fparent1 = manifest1.get(fname, nullid)
1101 1101 fparent2 = fparent2o = manifest2.get(fname, nullid)
1102 1102
1103 1103 meta = {}
1104 1104 copy = fctx.renamed()
1105 1105 if copy and copy[0] != fname:
1106 1106 # Mark the new revision of this file as a copy of another
1107 1107 # file. This copy data will effectively act as a parent
1108 1108 # of this new revision. If this is a merge, the first
1109 1109 # parent will be the nullid (meaning "look up the copy data")
1110 1110 # and the second one will be the other parent. For example:
1111 1111 #
1112 1112 # 0 --- 1 --- 3 rev1 changes file foo
1113 1113 # \ / rev2 renames foo to bar and changes it
1114 1114 # \- 2 -/ rev3 should have bar with all changes and
1115 1115 # should record that bar descends from
1116 1116 # bar in rev2 and foo in rev1
1117 1117 #
1118 1118 # this allows this merge to succeed:
1119 1119 #
1120 1120 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1121 1121 # \ / merging rev3 and rev4 should use bar@rev2
1122 1122 # \- 2 --- 4 as the merge base
1123 1123 #
1124 1124
1125 1125 cfname = copy[0]
1126 1126 crev = manifest1.get(cfname)
1127 1127 newfparent = fparent2
1128 1128
1129 1129 if manifest2: # branch merge
1130 1130 if fparent2 == nullid or crev is None: # copied on remote side
1131 1131 if cfname in manifest2:
1132 1132 crev = manifest2[cfname]
1133 1133 newfparent = fparent1
1134 1134
1135 1135 # find source in nearest ancestor if we've lost track
1136 1136 if not crev:
1137 1137 self.ui.debug(" %s: searching for copy revision for %s\n" %
1138 1138 (fname, cfname))
1139 1139 for ancestor in self[None].ancestors():
1140 1140 if cfname in ancestor:
1141 1141 crev = ancestor[cfname].filenode()
1142 1142 break
1143 1143
1144 1144 if crev:
1145 1145 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1146 1146 meta["copy"] = cfname
1147 1147 meta["copyrev"] = hex(crev)
1148 1148 fparent1, fparent2 = nullid, newfparent
1149 1149 else:
1150 1150 self.ui.warn(_("warning: can't find ancestor for '%s' "
1151 1151 "copied from '%s'!\n") % (fname, cfname))
1152 1152
1153 1153 elif fparent1 == nullid:
1154 1154 fparent1, fparent2 = fparent2, nullid
1155 1155 elif fparent2 != nullid:
1156 1156 # is one parent an ancestor of the other?
1157 1157 fparentancestor = flog.ancestor(fparent1, fparent2)
1158 1158 if fparentancestor == fparent1:
1159 1159 fparent1, fparent2 = fparent2, nullid
1160 1160 elif fparentancestor == fparent2:
1161 1161 fparent2 = nullid
1162 1162
1163 1163 # is the file changed?
1164 1164 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1165 1165 changelist.append(fname)
1166 1166 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1167 1167
1168 1168 # are just the flags changed during merge?
1169 1169 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1170 1170 changelist.append(fname)
1171 1171
1172 1172 return fparent1
1173 1173
1174 1174 @unfilteredmethod
1175 1175 def commit(self, text="", user=None, date=None, match=None, force=False,
1176 1176 editor=False, extra={}):
1177 1177 """Add a new revision to current repository.
1178 1178
1179 1179 Revision information is gathered from the working directory,
1180 1180 match can be used to filter the committed files. If editor is
1181 1181 supplied, it is called to get a commit message.
1182 1182 """
1183 1183
1184 1184 def fail(f, msg):
1185 1185 raise util.Abort('%s: %s' % (f, msg))
1186 1186
1187 1187 if not match:
1188 1188 match = matchmod.always(self.root, '')
1189 1189
1190 1190 if not force:
1191 1191 vdirs = []
1192 1192 match.explicitdir = vdirs.append
1193 1193 match.bad = fail
1194 1194
1195 1195 wlock = self.wlock()
1196 1196 try:
1197 1197 wctx = self[None]
1198 1198 merge = len(wctx.parents()) > 1
1199 1199
1200 1200 if (not force and merge and match and
1201 1201 (match.files() or match.anypats())):
1202 1202 raise util.Abort(_('cannot partially commit a merge '
1203 1203 '(do not specify files or patterns)'))
1204 1204
1205 1205 changes = self.status(match=match, clean=force)
1206 1206 if force:
1207 1207 changes[0].extend(changes[6]) # mq may commit unchanged files
1208 1208
1209 1209 # check subrepos
1210 1210 subs = []
1211 1211 commitsubs = set()
1212 1212 newstate = wctx.substate.copy()
1213 1213 # only manage subrepos and .hgsubstate if .hgsub is present
1214 1214 if '.hgsub' in wctx:
1215 1215 # we'll decide whether to track this ourselves, thanks
1216 1216 for c in changes[:3]:
1217 1217 if '.hgsubstate' in c:
1218 1218 c.remove('.hgsubstate')
1219 1219
1220 1220 # compare current state to last committed state
1221 1221 # build new substate based on last committed state
1222 1222 oldstate = wctx.p1().substate
1223 1223 for s in sorted(newstate.keys()):
1224 1224 if not match(s):
1225 1225 # ignore working copy, use old state if present
1226 1226 if s in oldstate:
1227 1227 newstate[s] = oldstate[s]
1228 1228 continue
1229 1229 if not force:
1230 1230 raise util.Abort(
1231 1231 _("commit with new subrepo %s excluded") % s)
1232 1232 if wctx.sub(s).dirty(True):
1233 1233 if not self.ui.configbool('ui', 'commitsubrepos'):
1234 1234 raise util.Abort(
1235 1235 _("uncommitted changes in subrepo %s") % s,
1236 1236 hint=_("use --subrepos for recursive commit"))
1237 1237 subs.append(s)
1238 1238 commitsubs.add(s)
1239 1239 else:
1240 1240 bs = wctx.sub(s).basestate()
1241 1241 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1242 1242 if oldstate.get(s, (None, None, None))[1] != bs:
1243 1243 subs.append(s)
1244 1244
1245 1245 # check for removed subrepos
1246 1246 for p in wctx.parents():
1247 1247 r = [s for s in p.substate if s not in newstate]
1248 1248 subs += [s for s in r if match(s)]
1249 1249 if subs:
1250 1250 if (not match('.hgsub') and
1251 1251 '.hgsub' in (wctx.modified() + wctx.added())):
1252 1252 raise util.Abort(
1253 1253 _("can't commit subrepos without .hgsub"))
1254 1254 changes[0].insert(0, '.hgsubstate')
1255 1255
1256 1256 elif '.hgsub' in changes[2]:
1257 1257 # clean up .hgsubstate when .hgsub is removed
1258 1258 if ('.hgsubstate' in wctx and
1259 1259 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1260 1260 changes[2].insert(0, '.hgsubstate')
1261 1261
1262 1262 # make sure all explicit patterns are matched
1263 1263 if not force and match.files():
1264 1264 matched = set(changes[0] + changes[1] + changes[2])
1265 1265
1266 1266 for f in match.files():
1267 1267 f = self.dirstate.normalize(f)
1268 1268 if f == '.' or f in matched or f in wctx.substate:
1269 1269 continue
1270 1270 if f in changes[3]: # missing
1271 1271 fail(f, _('file not found!'))
1272 1272 if f in vdirs: # visited directory
1273 1273 d = f + '/'
1274 1274 for mf in matched:
1275 1275 if mf.startswith(d):
1276 1276 break
1277 1277 else:
1278 1278 fail(f, _("no match under directory!"))
1279 1279 elif f not in self.dirstate:
1280 1280 fail(f, _("file not tracked!"))
1281 1281
1282 1282 cctx = context.workingctx(self, text, user, date, extra, changes)
1283 1283
1284 1284 if (not force and not extra.get("close") and not merge
1285 1285 and not cctx.files()
1286 1286 and wctx.branch() == wctx.p1().branch()):
1287 1287 return None
1288 1288
1289 1289 if merge and cctx.deleted():
1290 1290 raise util.Abort(_("cannot commit merge with missing files"))
1291 1291
1292 1292 ms = mergemod.mergestate(self)
1293 1293 for f in changes[0]:
1294 1294 if f in ms and ms[f] == 'u':
1295 1295 raise util.Abort(_("unresolved merge conflicts "
1296 1296 "(see hg help resolve)"))
1297 1297
1298 1298 if editor:
1299 1299 cctx._text = editor(self, cctx, subs)
1300 1300 edited = (text != cctx._text)
1301 1301
1302 1302 # Save commit message in case this transaction gets rolled back
1303 1303 # (e.g. by a pretxncommit hook). Leave the content alone on
1304 1304 # the assumption that the user will use the same editor again.
1305 1305 msgfn = self.savecommitmessage(cctx._text)
1306 1306
1307 1307 # commit subs and write new state
1308 1308 if subs:
1309 1309 for s in sorted(commitsubs):
1310 1310 sub = wctx.sub(s)
1311 1311 self.ui.status(_('committing subrepository %s\n') %
1312 1312 subrepo.subrelpath(sub))
1313 1313 sr = sub.commit(cctx._text, user, date)
1314 1314 newstate[s] = (newstate[s][0], sr)
1315 1315 subrepo.writestate(self, newstate)
1316 1316
1317 1317 p1, p2 = self.dirstate.parents()
1318 1318 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1319 1319 try:
1320 1320 self.hook("precommit", throw=True, parent1=hookp1,
1321 1321 parent2=hookp2)
1322 1322 ret = self.commitctx(cctx, True)
1323 1323 except: # re-raises
1324 1324 if edited:
1325 1325 self.ui.write(
1326 1326 _('note: commit message saved in %s\n') % msgfn)
1327 1327 raise
1328 1328
1329 1329 # update bookmarks, dirstate and mergestate
1330 1330 bookmarks.update(self, [p1, p2], ret)
1331 1331 cctx.markcommitted(ret)
1332 1332 ms.reset()
1333 1333 finally:
1334 1334 wlock.release()
1335 1335
1336 1336 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1337 1337 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1338 1338 self._afterlock(commithook)
1339 1339 return ret
1340 1340
1341 1341 @unfilteredmethod
1342 1342 def commitctx(self, ctx, error=False):
1343 1343 """Add a new revision to current repository.
1344 1344 Revision information is passed via the context argument.
1345 1345 """
1346 1346
1347 1347 tr = lock = None
1348 1348 removed = list(ctx.removed())
1349 1349 p1, p2 = ctx.p1(), ctx.p2()
1350 1350 user = ctx.user()
1351 1351
1352 1352 lock = self.lock()
1353 1353 try:
1354 1354 tr = self.transaction("commit")
1355 1355 trp = weakref.proxy(tr)
1356 1356
1357 1357 if ctx.files():
1358 1358 m1 = p1.manifest().copy()
1359 1359 m2 = p2.manifest()
1360 1360
1361 1361 # check in files
1362 1362 new = {}
1363 1363 changed = []
1364 1364 linkrev = len(self)
1365 1365 for f in sorted(ctx.modified() + ctx.added()):
1366 1366 self.ui.note(f + "\n")
1367 1367 try:
1368 1368 fctx = ctx[f]
1369 1369 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1370 1370 changed)
1371 1371 m1.set(f, fctx.flags())
1372 1372 except OSError, inst:
1373 1373 self.ui.warn(_("trouble committing %s!\n") % f)
1374 1374 raise
1375 1375 except IOError, inst:
1376 1376 errcode = getattr(inst, 'errno', errno.ENOENT)
1377 1377 if error or errcode and errcode != errno.ENOENT:
1378 1378 self.ui.warn(_("trouble committing %s!\n") % f)
1379 1379 raise
1380 1380 else:
1381 1381 removed.append(f)
1382 1382
1383 1383 # update manifest
1384 1384 m1.update(new)
1385 1385 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1386 1386 drop = [f for f in removed if f in m1]
1387 1387 for f in drop:
1388 1388 del m1[f]
1389 1389 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1390 1390 p2.manifestnode(), (new, drop))
1391 1391 files = changed + removed
1392 1392 else:
1393 1393 mn = p1.manifestnode()
1394 1394 files = []
1395 1395
1396 1396 # update changelog
1397 1397 self.changelog.delayupdate()
1398 1398 n = self.changelog.add(mn, files, ctx.description(),
1399 1399 trp, p1.node(), p2.node(),
1400 1400 user, ctx.date(), ctx.extra().copy())
1401 1401 p = lambda: self.changelog.writepending() and self.root or ""
1402 1402 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1403 1403 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1404 1404 parent2=xp2, pending=p)
1405 1405 self.changelog.finalize(trp)
1406 1406 # set the new commit is proper phase
1407 1407 targetphase = subrepo.newcommitphase(self.ui, ctx)
1408 1408 if targetphase:
1409 1409 # retract boundary do not alter parent changeset.
1410 1410 # if a parent have higher the resulting phase will
1411 1411 # be compliant anyway
1412 1412 #
1413 1413 # if minimal phase was 0 we don't need to retract anything
1414 1414 phases.retractboundary(self, targetphase, [n])
1415 1415 tr.close()
1416 1416 branchmap.updatecache(self.filtered('served'))
1417 1417 return n
1418 1418 finally:
1419 1419 if tr:
1420 1420 tr.release()
1421 1421 lock.release()
1422 1422
1423 1423 @unfilteredmethod
1424 1424 def destroying(self):
1425 1425 '''Inform the repository that nodes are about to be destroyed.
1426 1426 Intended for use by strip and rollback, so there's a common
1427 1427 place for anything that has to be done before destroying history.
1428 1428
1429 1429 This is mostly useful for saving state that is in memory and waiting
1430 1430 to be flushed when the current lock is released. Because a call to
1431 1431 destroyed is imminent, the repo will be invalidated causing those
1432 1432 changes to stay in memory (waiting for the next unlock), or vanish
1433 1433 completely.
1434 1434 '''
1435 1435 # When using the same lock to commit and strip, the phasecache is left
1436 1436 # dirty after committing. Then when we strip, the repo is invalidated,
1437 1437 # causing those changes to disappear.
1438 1438 if '_phasecache' in vars(self):
1439 1439 self._phasecache.write()
1440 1440
1441 1441 @unfilteredmethod
1442 1442 def destroyed(self):
1443 1443 '''Inform the repository that nodes have been destroyed.
1444 1444 Intended for use by strip and rollback, so there's a common
1445 1445 place for anything that has to be done after destroying history.
1446 1446 '''
1447 1447 # When one tries to:
1448 1448 # 1) destroy nodes thus calling this method (e.g. strip)
1449 1449 # 2) use phasecache somewhere (e.g. commit)
1450 1450 #
1451 1451 # then 2) will fail because the phasecache contains nodes that were
1452 1452 # removed. We can either remove phasecache from the filecache,
1453 1453 # causing it to reload next time it is accessed, or simply filter
1454 1454 # the removed nodes now and write the updated cache.
1455 1455 self._phasecache.filterunknown(self)
1456 1456 self._phasecache.write()
1457 1457
1458 1458 # update the 'served' branch cache to help read only server process
1459 1459 # Thanks to branchcache collaboration this is done from the nearest
1460 1460 # filtered subset and it is expected to be fast.
1461 1461 branchmap.updatecache(self.filtered('served'))
1462 1462
1463 1463 # Ensure the persistent tag cache is updated. Doing it now
1464 1464 # means that the tag cache only has to worry about destroyed
1465 1465 # heads immediately after a strip/rollback. That in turn
1466 1466 # guarantees that "cachetip == currenttip" (comparing both rev
1467 1467 # and node) always means no nodes have been added or destroyed.
1468 1468
1469 1469 # XXX this is suboptimal when qrefresh'ing: we strip the current
1470 1470 # head, refresh the tag cache, then immediately add a new head.
1471 1471 # But I think doing it this way is necessary for the "instant
1472 1472 # tag cache retrieval" case to work.
1473 1473 self.invalidate()
1474 1474
1475 1475 def walk(self, match, node=None):
1476 1476 '''
1477 1477 walk recursively through the directory tree or a given
1478 1478 changeset, finding all files matched by the match
1479 1479 function
1480 1480 '''
1481 1481 return self[node].walk(match)
1482 1482
1483 1483 def status(self, node1='.', node2=None, match=None,
1484 1484 ignored=False, clean=False, unknown=False,
1485 1485 listsubrepos=False):
1486 1486 """return status of files between two nodes or node and working
1487 1487 directory.
1488 1488
1489 1489 If node1 is None, use the first dirstate parent instead.
1490 1490 If node2 is None, compare node1 with working directory.
1491 1491 """
1492 1492
1493 1493 def mfmatches(ctx):
1494 1494 mf = ctx.manifest().copy()
1495 1495 if match.always():
1496 1496 return mf
1497 1497 for fn in mf.keys():
1498 1498 if not match(fn):
1499 1499 del mf[fn]
1500 1500 return mf
1501 1501
1502 1502 ctx1 = self[node1]
1503 1503 ctx2 = self[node2]
1504 1504
1505 1505 working = ctx2.rev() is None
1506 1506 parentworking = working and ctx1 == self['.']
1507 1507 match = match or matchmod.always(self.root, self.getcwd())
1508 1508 listignored, listclean, listunknown = ignored, clean, unknown
1509 1509
1510 1510 # load earliest manifest first for caching reasons
1511 1511 if not working and ctx2.rev() < ctx1.rev():
1512 1512 ctx2.manifest()
1513 1513
1514 1514 if not parentworking:
1515 1515 def bad(f, msg):
1516 1516 # 'f' may be a directory pattern from 'match.files()',
1517 1517 # so 'f not in ctx1' is not enough
1518 1518 if f not in ctx1 and f not in ctx1.dirs():
1519 1519 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1520 1520 match.bad = bad
1521 1521
1522 1522 if working: # we need to scan the working dir
1523 1523 subrepos = []
1524 1524 if '.hgsub' in self.dirstate:
1525 1525 subrepos = sorted(ctx2.substate)
1526 1526 s = self.dirstate.status(match, subrepos, listignored,
1527 1527 listclean, listunknown)
1528 1528 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1529 1529
1530 1530 # check for any possibly clean files
1531 1531 if parentworking and cmp:
1532 1532 fixup = []
1533 1533 # do a full compare of any files that might have changed
1534 1534 for f in sorted(cmp):
1535 1535 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1536 1536 or ctx1[f].cmp(ctx2[f])):
1537 1537 modified.append(f)
1538 1538 else:
1539 1539 fixup.append(f)
1540 1540
1541 1541 # update dirstate for files that are actually clean
1542 1542 if fixup:
1543 1543 if listclean:
1544 1544 clean += fixup
1545 1545
1546 1546 try:
1547 1547 # updating the dirstate is optional
1548 1548 # so we don't wait on the lock
1549 1549 wlock = self.wlock(False)
1550 1550 try:
1551 1551 for f in fixup:
1552 1552 self.dirstate.normal(f)
1553 1553 finally:
1554 1554 wlock.release()
1555 1555 except error.LockError:
1556 1556 pass
1557 1557
1558 1558 if not parentworking:
1559 1559 mf1 = mfmatches(ctx1)
1560 1560 if working:
1561 1561 # we are comparing working dir against non-parent
1562 1562 # generate a pseudo-manifest for the working dir
1563 1563 mf2 = mfmatches(self['.'])
1564 1564 for f in cmp + modified + added:
1565 1565 mf2[f] = None
1566 1566 mf2.set(f, ctx2.flags(f))
1567 1567 for f in removed:
1568 1568 if f in mf2:
1569 1569 del mf2[f]
1570 1570 else:
1571 1571 # we are comparing two revisions
1572 1572 deleted, unknown, ignored = [], [], []
1573 1573 mf2 = mfmatches(ctx2)
1574 1574
1575 1575 modified, added, clean = [], [], []
1576 1576 withflags = mf1.withflags() | mf2.withflags()
1577 1577 for fn, mf2node in mf2.iteritems():
1578 1578 if fn in mf1:
1579 1579 if (fn not in deleted and
1580 1580 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1581 1581 (mf1[fn] != mf2node and
1582 1582 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1583 1583 modified.append(fn)
1584 1584 elif listclean:
1585 1585 clean.append(fn)
1586 1586 del mf1[fn]
1587 1587 elif fn not in deleted:
1588 1588 added.append(fn)
1589 1589 removed = mf1.keys()
1590 1590
1591 1591 if working and modified and not self.dirstate._checklink:
1592 1592 # Symlink placeholders may get non-symlink-like contents
1593 1593 # via user error or dereferencing by NFS or Samba servers,
1594 1594 # so we filter out any placeholders that don't look like a
1595 1595 # symlink
1596 1596 sane = []
1597 1597 for f in modified:
1598 1598 if ctx2.flags(f) == 'l':
1599 1599 d = ctx2[f].data()
1600 1600 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1601 1601 self.ui.debug('ignoring suspect symlink placeholder'
1602 1602 ' "%s"\n' % f)
1603 1603 continue
1604 1604 sane.append(f)
1605 1605 modified = sane
1606 1606
1607 1607 r = modified, added, removed, deleted, unknown, ignored, clean
1608 1608
1609 1609 if listsubrepos:
1610 1610 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1611 1611 if working:
1612 1612 rev2 = None
1613 1613 else:
1614 1614 rev2 = ctx2.substate[subpath][1]
1615 1615 try:
1616 1616 submatch = matchmod.narrowmatcher(subpath, match)
1617 1617 s = sub.status(rev2, match=submatch, ignored=listignored,
1618 1618 clean=listclean, unknown=listunknown,
1619 1619 listsubrepos=True)
1620 1620 for rfiles, sfiles in zip(r, s):
1621 1621 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1622 1622 except error.LookupError:
1623 1623 self.ui.status(_("skipping missing subrepository: %s\n")
1624 1624 % subpath)
1625 1625
1626 1626 for l in r:
1627 1627 l.sort()
1628 1628 return r
1629 1629
1630 1630 def heads(self, start=None):
1631 1631 heads = self.changelog.heads(start)
1632 1632 # sort the output in rev descending order
1633 1633 return sorted(heads, key=self.changelog.rev, reverse=True)
1634 1634
1635 1635 def branchheads(self, branch=None, start=None, closed=False):
1636 1636 '''return a (possibly filtered) list of heads for the given branch
1637 1637
1638 1638 Heads are returned in topological order, from newest to oldest.
1639 1639 If branch is None, use the dirstate branch.
1640 1640 If start is not None, return only heads reachable from start.
1641 1641 If closed is True, return heads that are marked as closed as well.
1642 1642 '''
1643 1643 if branch is None:
1644 1644 branch = self[None].branch()
1645 1645 branches = self.branchmap()
1646 1646 if branch not in branches:
1647 1647 return []
1648 1648 # the cache returns heads ordered lowest to highest
1649 1649 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1650 1650 if start is not None:
1651 1651 # filter out the heads that cannot be reached from startrev
1652 1652 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1653 1653 bheads = [h for h in bheads if h in fbheads]
1654 1654 return bheads
1655 1655
1656 1656 def branches(self, nodes):
1657 1657 if not nodes:
1658 1658 nodes = [self.changelog.tip()]
1659 1659 b = []
1660 1660 for n in nodes:
1661 1661 t = n
1662 1662 while True:
1663 1663 p = self.changelog.parents(n)
1664 1664 if p[1] != nullid or p[0] == nullid:
1665 1665 b.append((t, n, p[0], p[1]))
1666 1666 break
1667 1667 n = p[0]
1668 1668 return b
1669 1669
1670 1670 def between(self, pairs):
1671 1671 r = []
1672 1672
1673 1673 for top, bottom in pairs:
1674 1674 n, l, i = top, [], 0
1675 1675 f = 1
1676 1676
1677 1677 while n != bottom and n != nullid:
1678 1678 p = self.changelog.parents(n)[0]
1679 1679 if i == f:
1680 1680 l.append(n)
1681 1681 f = f * 2
1682 1682 n = p
1683 1683 i += 1
1684 1684
1685 1685 r.append(l)
1686 1686
1687 1687 return r
1688 1688
1689 1689 def pull(self, remote, heads=None, force=False):
1690 1690 return exchange.pull (self, remote, heads, force)
1691 1691
1692 1692 def checkpush(self, pushop):
1693 1693 """Extensions can override this function if additional checks have
1694 1694 to be performed before pushing, or call it if they override push
1695 1695 command.
1696 1696 """
1697 1697 pass
1698 1698
1699 1699 def push(self, remote, force=False, revs=None, newbranch=False):
1700 1700 return exchange.push(self, remote, force, revs, newbranch)
1701 1701
1702 1702 def stream_in(self, remote, requirements):
1703 1703 lock = self.lock()
1704 1704 try:
1705 1705 # Save remote branchmap. We will use it later
1706 1706 # to speed up branchcache creation
1707 1707 rbranchmap = None
1708 1708 if remote.capable("branchmap"):
1709 1709 rbranchmap = remote.branchmap()
1710 1710
1711 1711 fp = remote.stream_out()
1712 1712 l = fp.readline()
1713 1713 try:
1714 1714 resp = int(l)
1715 1715 except ValueError:
1716 1716 raise error.ResponseError(
1717 1717 _('unexpected response from remote server:'), l)
1718 1718 if resp == 1:
1719 1719 raise util.Abort(_('operation forbidden by server'))
1720 1720 elif resp == 2:
1721 1721 raise util.Abort(_('locking the remote repository failed'))
1722 1722 elif resp != 0:
1723 1723 raise util.Abort(_('the server sent an unknown error code'))
1724 1724 self.ui.status(_('streaming all changes\n'))
1725 1725 l = fp.readline()
1726 1726 try:
1727 1727 total_files, total_bytes = map(int, l.split(' ', 1))
1728 1728 except (ValueError, TypeError):
1729 1729 raise error.ResponseError(
1730 1730 _('unexpected response from remote server:'), l)
1731 1731 self.ui.status(_('%d files to transfer, %s of data\n') %
1732 1732 (total_files, util.bytecount(total_bytes)))
1733 1733 handled_bytes = 0
1734 1734 self.ui.progress(_('clone'), 0, total=total_bytes)
1735 1735 start = time.time()
1736 1736
1737 1737 tr = self.transaction(_('clone'))
1738 1738 try:
1739 1739 for i in xrange(total_files):
1740 1740 # XXX doesn't support '\n' or '\r' in filenames
1741 1741 l = fp.readline()
1742 1742 try:
1743 1743 name, size = l.split('\0', 1)
1744 1744 size = int(size)
1745 1745 except (ValueError, TypeError):
1746 1746 raise error.ResponseError(
1747 1747 _('unexpected response from remote server:'), l)
1748 1748 if self.ui.debugflag:
1749 1749 self.ui.debug('adding %s (%s)\n' %
1750 1750 (name, util.bytecount(size)))
1751 1751 # for backwards compat, name was partially encoded
1752 1752 ofp = self.sopener(store.decodedir(name), 'w')
1753 1753 for chunk in util.filechunkiter(fp, limit=size):
1754 1754 handled_bytes += len(chunk)
1755 1755 self.ui.progress(_('clone'), handled_bytes,
1756 1756 total=total_bytes)
1757 1757 ofp.write(chunk)
1758 1758 ofp.close()
1759 1759 tr.close()
1760 1760 finally:
1761 1761 tr.release()
1762 1762
1763 1763 # Writing straight to files circumvented the inmemory caches
1764 1764 self.invalidate()
1765 1765
1766 1766 elapsed = time.time() - start
1767 1767 if elapsed <= 0:
1768 1768 elapsed = 0.001
1769 1769 self.ui.progress(_('clone'), None)
1770 1770 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1771 1771 (util.bytecount(total_bytes), elapsed,
1772 1772 util.bytecount(total_bytes / elapsed)))
1773 1773
1774 1774 # new requirements = old non-format requirements +
1775 1775 # new format-related
1776 1776 # requirements from the streamed-in repository
1777 1777 requirements.update(set(self.requirements) - self.supportedformats)
1778 1778 self._applyrequirements(requirements)
1779 1779 self._writerequirements()
1780 1780
1781 1781 if rbranchmap:
1782 1782 rbheads = []
1783 1783 for bheads in rbranchmap.itervalues():
1784 1784 rbheads.extend(bheads)
1785 1785
1786 1786 if rbheads:
1787 1787 rtiprev = max((int(self.changelog.rev(node))
1788 1788 for node in rbheads))
1789 1789 cache = branchmap.branchcache(rbranchmap,
1790 1790 self[rtiprev].node(),
1791 1791 rtiprev)
1792 1792 # Try to stick it as low as possible
1793 1793 # filter above served are unlikely to be fetch from a clone
1794 1794 for candidate in ('base', 'immutable', 'served'):
1795 1795 rview = self.filtered(candidate)
1796 1796 if cache.validfor(rview):
1797 1797 self._branchcaches[candidate] = cache
1798 1798 cache.write(rview)
1799 1799 break
1800 1800 self.invalidate()
1801 1801 return len(self.heads()) + 1
1802 1802 finally:
1803 1803 lock.release()
1804 1804
1805 1805 def clone(self, remote, heads=[], stream=False):
1806 1806 '''clone remote repository.
1807 1807
1808 1808 keyword arguments:
1809 1809 heads: list of revs to clone (forces use of pull)
1810 1810 stream: use streaming clone if possible'''
1811 1811
1812 1812 # now, all clients that can request uncompressed clones can
1813 1813 # read repo formats supported by all servers that can serve
1814 1814 # them.
1815 1815
1816 1816 # if revlog format changes, client will have to check version
1817 1817 # and format flags on "stream" capability, and use
1818 1818 # uncompressed only if compatible.
1819 1819
1820 1820 if not stream:
1821 1821 # if the server explicitly prefers to stream (for fast LANs)
1822 1822 stream = remote.capable('stream-preferred')
1823 1823
1824 1824 if stream and not heads:
1825 1825 # 'stream' means remote revlog format is revlogv1 only
1826 1826 if remote.capable('stream'):
1827 1827 return self.stream_in(remote, set(('revlogv1',)))
1828 1828 # otherwise, 'streamreqs' contains the remote revlog format
1829 1829 streamreqs = remote.capable('streamreqs')
1830 1830 if streamreqs:
1831 1831 streamreqs = set(streamreqs.split(','))
1832 1832 # if we support it, stream in and adjust our requirements
1833 1833 if not streamreqs - self.supportedformats:
1834 1834 return self.stream_in(remote, streamreqs)
1835 1835 return self.pull(remote, heads)
1836 1836
1837 1837 def pushkey(self, namespace, key, old, new):
1838 1838 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1839 1839 old=old, new=new)
1840 1840 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1841 1841 ret = pushkey.push(self, namespace, key, old, new)
1842 1842 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1843 1843 ret=ret)
1844 1844 return ret
1845 1845
1846 1846 def listkeys(self, namespace):
1847 1847 self.hook('prelistkeys', throw=True, namespace=namespace)
1848 1848 self.ui.debug('listing keys for "%s"\n' % namespace)
1849 1849 values = pushkey.list(self, namespace)
1850 1850 self.hook('listkeys', namespace=namespace, values=values)
1851 1851 return values
1852 1852
1853 1853 def debugwireargs(self, one, two, three=None, four=None, five=None):
1854 1854 '''used to test argument passing over the wire'''
1855 1855 return "%s %s %s %s %s" % (one, two, three, four, five)
1856 1856
1857 1857 def savecommitmessage(self, text):
1858 1858 fp = self.opener('last-message.txt', 'wb')
1859 1859 try:
1860 1860 fp.write(text)
1861 1861 finally:
1862 1862 fp.close()
1863 1863 return self.pathto(fp.name[len(self.root) + 1:])
1864 1864
1865 1865 # used to avoid circular references so destructors work
1866 1866 def aftertrans(files):
1867 1867 renamefiles = [tuple(t) for t in files]
1868 1868 def a():
1869 1869 for vfs, src, dest in renamefiles:
1870 1870 try:
1871 1871 vfs.rename(src, dest)
1872 1872 except OSError: # journal file does not yet exist
1873 1873 pass
1874 1874 return a
1875 1875
1876 1876 def undoname(fn):
1877 1877 base, name = os.path.split(fn)
1878 1878 assert name.startswith('journal')
1879 1879 return os.path.join(base, name.replace('journal', 'undo', 1))
1880 1880
1881 1881 def instance(ui, path, create):
1882 1882 return localrepository(ui, util.urllocalpath(path), create)
1883 1883
1884 1884 def islocal(path):
1885 1885 return True
@@ -1,179 +1,180 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from mercurial import changegroup
10 10 from mercurial.node import short
11 11 from mercurial.i18n import _
12 12 import os
13 13 import errno
14 14
15 15 def _bundle(repo, bases, heads, node, suffix, compress=True):
16 16 """create a bundle with the specified revisions as a backup"""
17 17 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip')
18 18 backupdir = repo.join("strip-backup")
19 19 if not os.path.isdir(backupdir):
20 20 os.mkdir(backupdir)
21 21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
22 22 if compress:
23 23 bundletype = "HG10BZ"
24 24 else:
25 25 bundletype = "HG10UN"
26 26 return changegroup.writebundle(cg, name, bundletype)
27 27
28 28 def _collectfiles(repo, striprev):
29 29 """find out the filelogs affected by the strip"""
30 30 files = set()
31 31
32 32 for x in xrange(striprev, len(repo)):
33 33 files.update(repo[x].files())
34 34
35 35 return sorted(files)
36 36
37 37 def _collectbrokencsets(repo, files, striprev):
38 38 """return the changesets which will be broken by the truncation"""
39 39 s = set()
40 40 def collectone(revlog):
41 41 _, brokenset = revlog.getstrippoint(striprev)
42 42 s.update([revlog.linkrev(r) for r in brokenset])
43 43
44 44 collectone(repo.manifest)
45 45 for fname in files:
46 46 collectone(repo.file(fname))
47 47
48 48 return s
49 49
50 50 def strip(ui, repo, nodelist, backup="all", topic='backup'):
51 51 repo = repo.unfiltered()
52 52 repo.destroying()
53 53
54 54 cl = repo.changelog
55 55 # TODO handle undo of merge sets
56 56 if isinstance(nodelist, str):
57 57 nodelist = [nodelist]
58 58 striplist = [cl.rev(node) for node in nodelist]
59 59 striprev = min(striplist)
60 60
61 61 keeppartialbundle = backup == 'strip'
62 62
63 63 # Some revisions with rev > striprev may not be descendants of striprev.
64 64 # We have to find these revisions and put them in a bundle, so that
65 65 # we can restore them after the truncations.
66 66 # To create the bundle we use repo.changegroupsubset which requires
67 67 # the list of heads and bases of the set of interesting revisions.
68 68 # (head = revision in the set that has no descendant in the set;
69 69 # base = revision in the set that has no ancestor in the set)
70 70 tostrip = set(striplist)
71 71 for rev in striplist:
72 72 for desc in cl.descendants([rev]):
73 73 tostrip.add(desc)
74 74
75 75 files = _collectfiles(repo, striprev)
76 76 saverevs = _collectbrokencsets(repo, files, striprev)
77 77
78 78 # compute heads
79 79 saveheads = set(saverevs)
80 80 for r in xrange(striprev + 1, len(cl)):
81 81 if r not in tostrip:
82 82 saverevs.add(r)
83 83 saveheads.difference_update(cl.parentrevs(r))
84 84 saveheads.add(r)
85 85 saveheads = [cl.node(r) for r in saveheads]
86 86
87 87 # compute base nodes
88 88 if saverevs:
89 89 descendants = set(cl.descendants(saverevs))
90 90 saverevs.difference_update(descendants)
91 91 savebases = [cl.node(r) for r in saverevs]
92 92 stripbases = [cl.node(r) for r in tostrip]
93 93
94 94 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
95 95 # is much faster
96 96 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
97 97 if newbmtarget:
98 98 newbmtarget = repo[newbmtarget[0]].node()
99 99 else:
100 100 newbmtarget = '.'
101 101
102 102 bm = repo._bookmarks
103 103 updatebm = []
104 104 for m in bm:
105 105 rev = repo[bm[m]].rev()
106 106 if rev in tostrip:
107 107 updatebm.append(m)
108 108
109 109 # create a changegroup for all the branches we need to keep
110 110 backupfile = None
111 111 if backup == "all":
112 112 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
113 113 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
114 114 repo.ui.log("backupbundle", "saved backup bundle to %s\n", backupfile)
115 115 if saveheads or savebases:
116 116 # do not compress partial bundle if we remove it from disk later
117 117 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
118 118 compress=keeppartialbundle)
119 119
120 120 mfst = repo.manifest
121 121
122 122 tr = repo.transaction("strip")
123 123 offset = len(tr.entries)
124 124
125 125 try:
126 126 tr.startgroup()
127 127 cl.strip(striprev, tr)
128 128 mfst.strip(striprev, tr)
129 129 for fn in files:
130 130 repo.file(fn).strip(striprev, tr)
131 131 tr.endgroup()
132 132
133 133 try:
134 134 for i in xrange(offset, len(tr.entries)):
135 135 file, troffset, ignore = tr.entries[i]
136 136 repo.sopener(file, 'a').truncate(troffset)
137 137 if troffset == 0:
138 138 repo.store.markremoved(file)
139 139 tr.close()
140 140 except: # re-raises
141 141 tr.abort()
142 142 raise
143 143
144 144 if saveheads or savebases:
145 145 ui.note(_("adding branch\n"))
146 146 f = open(chgrpfile, "rb")
147 147 gen = changegroup.readbundle(f, chgrpfile)
148 148 if not repo.ui.verbose:
149 149 # silence internal shuffling chatter
150 150 repo.ui.pushbuffer()
151 151 changegroup.addchangegroup(repo, gen, 'strip',
152 152 'bundle:' + chgrpfile, True)
153 153 if not repo.ui.verbose:
154 154 repo.ui.popbuffer()
155 155 f.close()
156 156 if not keeppartialbundle:
157 157 os.unlink(chgrpfile)
158 158
159 159 # remove undo files
160 for undofile in repo.undofiles():
160 for undovfs, undofile in repo.undofiles():
161 161 try:
162 os.unlink(undofile)
162 undovfs.unlink(undofile)
163 163 except OSError, e:
164 164 if e.errno != errno.ENOENT:
165 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
165 ui.warn(_('error removing %s: %s\n') %
166 (undovfs.join(undofile), str(e)))
166 167
167 168 for m in updatebm:
168 169 bm[m] = repo[newbmtarget].node()
169 170 bm.write()
170 171 except: # re-raises
171 172 if backupfile:
172 173 ui.warn(_("strip failed, full bundle stored in '%s'\n")
173 174 % backupfile)
174 175 elif saveheads:
175 176 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
176 177 % chgrpfile)
177 178 raise
178 179
179 180 repo.destroyed()
General Comments 0
You need to be logged in to leave comments. Login now