##// END OF EJS Templates
commit: catch changed exec bit on files from p1 (issue4382)
Matt Mackall -
r22492:d5261db0 stable
parent child Browse files
Show More
@@ -1,1781 +1,1780
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 propertycache = util.propertycache
22 22 filecache = scmutil.filecache
23 23
24 24 class repofilecache(filecache):
25 25 """All filecache usage on repo are done for logic that should be unfiltered
26 26 """
27 27
28 28 def __get__(self, repo, type=None):
29 29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 30 def __set__(self, repo, value):
31 31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 32 def __delete__(self, repo):
33 33 return super(repofilecache, self).__delete__(repo.unfiltered())
34 34
35 35 class storecache(repofilecache):
36 36 """filecache for files in the store"""
37 37 def join(self, obj, fname):
38 38 return obj.sjoin(fname)
39 39
40 40 class unfilteredpropertycache(propertycache):
41 41 """propertycache that apply to unfiltered repo only"""
42 42
43 43 def __get__(self, repo, type=None):
44 44 unfi = repo.unfiltered()
45 45 if unfi is repo:
46 46 return super(unfilteredpropertycache, self).__get__(unfi)
47 47 return getattr(unfi, self.name)
48 48
49 49 class filteredpropertycache(propertycache):
50 50 """propertycache that must take filtering in account"""
51 51
52 52 def cachevalue(self, obj, value):
53 53 object.__setattr__(obj, self.name, value)
54 54
55 55
56 56 def hasunfilteredcache(repo, name):
57 57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 58 return name in vars(repo.unfiltered())
59 59
60 60 def unfilteredmethod(orig):
61 61 """decorate method that always need to be run on unfiltered version"""
62 62 def wrapper(repo, *args, **kwargs):
63 63 return orig(repo.unfiltered(), *args, **kwargs)
64 64 return wrapper
65 65
66 66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 67 'unbundle'))
68 68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 69
70 70 class localpeer(peer.peerrepository):
71 71 '''peer for a local repo; reflects only the most recent API'''
72 72
73 73 def __init__(self, repo, caps=moderncaps):
74 74 peer.peerrepository.__init__(self)
75 75 self._repo = repo.filtered('served')
76 76 self.ui = repo.ui
77 77 self._caps = repo._restrictcapabilities(caps)
78 78 self.requirements = repo.requirements
79 79 self.supportedformats = repo.supportedformats
80 80
81 81 def close(self):
82 82 self._repo.close()
83 83
84 84 def _capabilities(self):
85 85 return self._caps
86 86
87 87 def local(self):
88 88 return self._repo
89 89
90 90 def canpush(self):
91 91 return True
92 92
93 93 def url(self):
94 94 return self._repo.url()
95 95
96 96 def lookup(self, key):
97 97 return self._repo.lookup(key)
98 98
99 99 def branchmap(self):
100 100 return self._repo.branchmap()
101 101
102 102 def heads(self):
103 103 return self._repo.heads()
104 104
105 105 def known(self, nodes):
106 106 return self._repo.known(nodes)
107 107
108 108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 109 format='HG10', **kwargs):
110 110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 111 common=common, bundlecaps=bundlecaps, **kwargs)
112 112 if bundlecaps is not None and 'HG2X' in bundlecaps:
113 113 # When requesting a bundle2, getbundle returns a stream to make the
114 114 # wire level function happier. We need to build a proper object
115 115 # from it in local peer.
116 116 cg = bundle2.unbundle20(self.ui, cg)
117 117 return cg
118 118
119 119 # TODO We might want to move the next two calls into legacypeer and add
120 120 # unbundle instead.
121 121
122 122 def unbundle(self, cg, heads, url):
123 123 """apply a bundle on a repo
124 124
125 125 This function handles the repo locking itself."""
126 126 try:
127 127 cg = exchange.readbundle(self.ui, cg, None)
128 128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 129 if util.safehasattr(ret, 'getchunks'):
130 130 # This is a bundle20 object, turn it into an unbundler.
131 131 # This little dance should be dropped eventually when the API
132 132 # is finally improved.
133 133 stream = util.chunkbuffer(ret.getchunks())
134 134 ret = bundle2.unbundle20(self.ui, stream)
135 135 return ret
136 136 except error.PushRaced, exc:
137 137 raise error.ResponseError(_('push failed:'), str(exc))
138 138
139 139 def lock(self):
140 140 return self._repo.lock()
141 141
142 142 def addchangegroup(self, cg, source, url):
143 143 return changegroup.addchangegroup(self._repo, cg, source, url)
144 144
145 145 def pushkey(self, namespace, key, old, new):
146 146 return self._repo.pushkey(namespace, key, old, new)
147 147
148 148 def listkeys(self, namespace):
149 149 return self._repo.listkeys(namespace)
150 150
151 151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 152 '''used to test argument passing over the wire'''
153 153 return "%s %s %s %s %s" % (one, two, three, four, five)
154 154
155 155 class locallegacypeer(localpeer):
156 156 '''peer extension which implements legacy methods too; used for tests with
157 157 restricted capabilities'''
158 158
159 159 def __init__(self, repo):
160 160 localpeer.__init__(self, repo, caps=legacycaps)
161 161
162 162 def branches(self, nodes):
163 163 return self._repo.branches(nodes)
164 164
165 165 def between(self, pairs):
166 166 return self._repo.between(pairs)
167 167
168 168 def changegroup(self, basenodes, source):
169 169 return changegroup.changegroup(self._repo, basenodes, source)
170 170
171 171 def changegroupsubset(self, bases, heads, source):
172 172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 173
174 174 class localrepository(object):
175 175
176 176 supportedformats = set(('revlogv1', 'generaldelta'))
177 177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 178 'dotencode'))
179 179 openerreqs = set(('revlogv1', 'generaldelta'))
180 180 requirements = ['revlogv1']
181 181 filtername = None
182 182
183 183 bundle2caps = {'HG2X': (),
184 184 'b2x:listkeys': (),
185 185 'b2x:pushkey': ()}
186 186
187 187 # a list of (ui, featureset) functions.
188 188 # only functions defined in module of enabled extensions are invoked
189 189 featuresetupfuncs = set()
190 190
191 191 def _baserequirements(self, create):
192 192 return self.requirements[:]
193 193
194 194 def __init__(self, baseui, path=None, create=False):
195 195 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
196 196 self.wopener = self.wvfs
197 197 self.root = self.wvfs.base
198 198 self.path = self.wvfs.join(".hg")
199 199 self.origroot = path
200 200 self.auditor = pathutil.pathauditor(self.root, self._checknested)
201 201 self.vfs = scmutil.vfs(self.path)
202 202 self.opener = self.vfs
203 203 self.baseui = baseui
204 204 self.ui = baseui.copy()
205 205 self.ui.copy = baseui.copy # prevent copying repo configuration
206 206 # A list of callback to shape the phase if no data were found.
207 207 # Callback are in the form: func(repo, roots) --> processed root.
208 208 # This list it to be filled by extension during repo setup
209 209 self._phasedefaults = []
210 210 try:
211 211 self.ui.readconfig(self.join("hgrc"), self.root)
212 212 extensions.loadall(self.ui)
213 213 except IOError:
214 214 pass
215 215
216 216 if self.featuresetupfuncs:
217 217 self.supported = set(self._basesupported) # use private copy
218 218 extmods = set(m.__name__ for n, m
219 219 in extensions.extensions(self.ui))
220 220 for setupfunc in self.featuresetupfuncs:
221 221 if setupfunc.__module__ in extmods:
222 222 setupfunc(self.ui, self.supported)
223 223 else:
224 224 self.supported = self._basesupported
225 225
226 226 if not self.vfs.isdir():
227 227 if create:
228 228 if not self.wvfs.exists():
229 229 self.wvfs.makedirs()
230 230 self.vfs.makedir(notindexed=True)
231 231 requirements = self._baserequirements(create)
232 232 if self.ui.configbool('format', 'usestore', True):
233 233 self.vfs.mkdir("store")
234 234 requirements.append("store")
235 235 if self.ui.configbool('format', 'usefncache', True):
236 236 requirements.append("fncache")
237 237 if self.ui.configbool('format', 'dotencode', True):
238 238 requirements.append('dotencode')
239 239 # create an invalid changelog
240 240 self.vfs.append(
241 241 "00changelog.i",
242 242 '\0\0\0\2' # represents revlogv2
243 243 ' dummy changelog to prevent using the old repo layout'
244 244 )
245 245 if self.ui.configbool('format', 'generaldelta', False):
246 246 requirements.append("generaldelta")
247 247 requirements = set(requirements)
248 248 else:
249 249 raise error.RepoError(_("repository %s not found") % path)
250 250 elif create:
251 251 raise error.RepoError(_("repository %s already exists") % path)
252 252 else:
253 253 try:
254 254 requirements = scmutil.readrequires(self.vfs, self.supported)
255 255 except IOError, inst:
256 256 if inst.errno != errno.ENOENT:
257 257 raise
258 258 requirements = set()
259 259
260 260 self.sharedpath = self.path
261 261 try:
262 262 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
263 263 realpath=True)
264 264 s = vfs.base
265 265 if not vfs.exists():
266 266 raise error.RepoError(
267 267 _('.hg/sharedpath points to nonexistent directory %s') % s)
268 268 self.sharedpath = s
269 269 except IOError, inst:
270 270 if inst.errno != errno.ENOENT:
271 271 raise
272 272
273 273 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
274 274 self.spath = self.store.path
275 275 self.svfs = self.store.vfs
276 276 self.sopener = self.svfs
277 277 self.sjoin = self.store.join
278 278 self.vfs.createmode = self.store.createmode
279 279 self._applyrequirements(requirements)
280 280 if create:
281 281 self._writerequirements()
282 282
283 283
284 284 self._branchcaches = {}
285 285 self.filterpats = {}
286 286 self._datafilters = {}
287 287 self._transref = self._lockref = self._wlockref = None
288 288
289 289 # A cache for various files under .hg/ that tracks file changes,
290 290 # (used by the filecache decorator)
291 291 #
292 292 # Maps a property name to its util.filecacheentry
293 293 self._filecache = {}
294 294
295 295 # hold sets of revision to be filtered
296 296 # should be cleared when something might have changed the filter value:
297 297 # - new changesets,
298 298 # - phase change,
299 299 # - new obsolescence marker,
300 300 # - working directory parent change,
301 301 # - bookmark changes
302 302 self.filteredrevcache = {}
303 303
304 304 def close(self):
305 305 pass
306 306
307 307 def _restrictcapabilities(self, caps):
308 308 # bundle2 is not ready for prime time, drop it unless explicitly
309 309 # required by the tests (or some brave tester)
310 310 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 311 caps = set(caps)
312 312 capsblob = bundle2.encodecaps(self.bundle2caps)
313 313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 314 return caps
315 315
316 316 def _applyrequirements(self, requirements):
317 317 self.requirements = requirements
318 318 self.sopener.options = dict((r, 1) for r in requirements
319 319 if r in self.openerreqs)
320 320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 321 if chunkcachesize is not None:
322 322 self.sopener.options['chunkcachesize'] = chunkcachesize
323 323
324 324 def _writerequirements(self):
325 325 reqfile = self.opener("requires", "w")
326 326 for r in sorted(self.requirements):
327 327 reqfile.write("%s\n" % r)
328 328 reqfile.close()
329 329
330 330 def _checknested(self, path):
331 331 """Determine if path is a legal nested repository."""
332 332 if not path.startswith(self.root):
333 333 return False
334 334 subpath = path[len(self.root) + 1:]
335 335 normsubpath = util.pconvert(subpath)
336 336
337 337 # XXX: Checking against the current working copy is wrong in
338 338 # the sense that it can reject things like
339 339 #
340 340 # $ hg cat -r 10 sub/x.txt
341 341 #
342 342 # if sub/ is no longer a subrepository in the working copy
343 343 # parent revision.
344 344 #
345 345 # However, it can of course also allow things that would have
346 346 # been rejected before, such as the above cat command if sub/
347 347 # is a subrepository now, but was a normal directory before.
348 348 # The old path auditor would have rejected by mistake since it
349 349 # panics when it sees sub/.hg/.
350 350 #
351 351 # All in all, checking against the working copy seems sensible
352 352 # since we want to prevent access to nested repositories on
353 353 # the filesystem *now*.
354 354 ctx = self[None]
355 355 parts = util.splitpath(subpath)
356 356 while parts:
357 357 prefix = '/'.join(parts)
358 358 if prefix in ctx.substate:
359 359 if prefix == normsubpath:
360 360 return True
361 361 else:
362 362 sub = ctx.sub(prefix)
363 363 return sub.checknested(subpath[len(prefix) + 1:])
364 364 else:
365 365 parts.pop()
366 366 return False
367 367
368 368 def peer(self):
369 369 return localpeer(self) # not cached to avoid reference cycle
370 370
371 371 def unfiltered(self):
372 372 """Return unfiltered version of the repository
373 373
374 374 Intended to be overwritten by filtered repo."""
375 375 return self
376 376
377 377 def filtered(self, name):
378 378 """Return a filtered version of a repository"""
379 379 # build a new class with the mixin and the current class
380 380 # (possibly subclass of the repo)
381 381 class proxycls(repoview.repoview, self.unfiltered().__class__):
382 382 pass
383 383 return proxycls(self, name)
384 384
385 385 @repofilecache('bookmarks')
386 386 def _bookmarks(self):
387 387 return bookmarks.bmstore(self)
388 388
389 389 @repofilecache('bookmarks.current')
390 390 def _bookmarkcurrent(self):
391 391 return bookmarks.readcurrent(self)
392 392
393 393 def bookmarkheads(self, bookmark):
394 394 name = bookmark.split('@', 1)[0]
395 395 heads = []
396 396 for mark, n in self._bookmarks.iteritems():
397 397 if mark.split('@', 1)[0] == name:
398 398 heads.append(n)
399 399 return heads
400 400
401 401 @storecache('phaseroots')
402 402 def _phasecache(self):
403 403 return phases.phasecache(self, self._phasedefaults)
404 404
405 405 @storecache('obsstore')
406 406 def obsstore(self):
407 407 store = obsolete.obsstore(self.sopener)
408 408 if store and not obsolete._enabled:
409 409 # message is rare enough to not be translated
410 410 msg = 'obsolete feature not enabled but %i markers found!\n'
411 411 self.ui.warn(msg % len(list(store)))
412 412 return store
413 413
414 414 @storecache('00changelog.i')
415 415 def changelog(self):
416 416 c = changelog.changelog(self.sopener)
417 417 if 'HG_PENDING' in os.environ:
418 418 p = os.environ['HG_PENDING']
419 419 if p.startswith(self.root):
420 420 c.readpending('00changelog.i.a')
421 421 return c
422 422
423 423 @storecache('00manifest.i')
424 424 def manifest(self):
425 425 return manifest.manifest(self.sopener)
426 426
427 427 @repofilecache('dirstate')
428 428 def dirstate(self):
429 429 warned = [0]
430 430 def validate(node):
431 431 try:
432 432 self.changelog.rev(node)
433 433 return node
434 434 except error.LookupError:
435 435 if not warned[0]:
436 436 warned[0] = True
437 437 self.ui.warn(_("warning: ignoring unknown"
438 438 " working parent %s!\n") % short(node))
439 439 return nullid
440 440
441 441 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
442 442
443 443 def __getitem__(self, changeid):
444 444 if changeid is None:
445 445 return context.workingctx(self)
446 446 return context.changectx(self, changeid)
447 447
448 448 def __contains__(self, changeid):
449 449 try:
450 450 return bool(self.lookup(changeid))
451 451 except error.RepoLookupError:
452 452 return False
453 453
454 454 def __nonzero__(self):
455 455 return True
456 456
457 457 def __len__(self):
458 458 return len(self.changelog)
459 459
460 460 def __iter__(self):
461 461 return iter(self.changelog)
462 462
463 463 def revs(self, expr, *args):
464 464 '''Return a list of revisions matching the given revset'''
465 465 expr = revset.formatspec(expr, *args)
466 466 m = revset.match(None, expr)
467 467 return m(self, revset.spanset(self))
468 468
469 469 def set(self, expr, *args):
470 470 '''
471 471 Yield a context for each matching revision, after doing arg
472 472 replacement via revset.formatspec
473 473 '''
474 474 for r in self.revs(expr, *args):
475 475 yield self[r]
476 476
477 477 def url(self):
478 478 return 'file:' + self.root
479 479
480 480 def hook(self, name, throw=False, **args):
481 481 """Call a hook, passing this repo instance.
482 482
483 483 This a convenience method to aid invoking hooks. Extensions likely
484 484 won't call this unless they have registered a custom hook or are
485 485 replacing code that is expected to call a hook.
486 486 """
487 487 return hook.hook(self.ui, self, name, throw, **args)
488 488
489 489 @unfilteredmethod
490 490 def _tag(self, names, node, message, local, user, date, extra={},
491 491 editor=False):
492 492 if isinstance(names, str):
493 493 names = (names,)
494 494
495 495 branches = self.branchmap()
496 496 for name in names:
497 497 self.hook('pretag', throw=True, node=hex(node), tag=name,
498 498 local=local)
499 499 if name in branches:
500 500 self.ui.warn(_("warning: tag %s conflicts with existing"
501 501 " branch name\n") % name)
502 502
503 503 def writetags(fp, names, munge, prevtags):
504 504 fp.seek(0, 2)
505 505 if prevtags and prevtags[-1] != '\n':
506 506 fp.write('\n')
507 507 for name in names:
508 508 m = munge and munge(name) or name
509 509 if (self._tagscache.tagtypes and
510 510 name in self._tagscache.tagtypes):
511 511 old = self.tags().get(name, nullid)
512 512 fp.write('%s %s\n' % (hex(old), m))
513 513 fp.write('%s %s\n' % (hex(node), m))
514 514 fp.close()
515 515
516 516 prevtags = ''
517 517 if local:
518 518 try:
519 519 fp = self.opener('localtags', 'r+')
520 520 except IOError:
521 521 fp = self.opener('localtags', 'a')
522 522 else:
523 523 prevtags = fp.read()
524 524
525 525 # local tags are stored in the current charset
526 526 writetags(fp, names, None, prevtags)
527 527 for name in names:
528 528 self.hook('tag', node=hex(node), tag=name, local=local)
529 529 return
530 530
531 531 try:
532 532 fp = self.wfile('.hgtags', 'rb+')
533 533 except IOError, e:
534 534 if e.errno != errno.ENOENT:
535 535 raise
536 536 fp = self.wfile('.hgtags', 'ab')
537 537 else:
538 538 prevtags = fp.read()
539 539
540 540 # committed tags are stored in UTF-8
541 541 writetags(fp, names, encoding.fromlocal, prevtags)
542 542
543 543 fp.close()
544 544
545 545 self.invalidatecaches()
546 546
547 547 if '.hgtags' not in self.dirstate:
548 548 self[None].add(['.hgtags'])
549 549
550 550 m = matchmod.exact(self.root, '', ['.hgtags'])
551 551 tagnode = self.commit(message, user, date, extra=extra, match=m,
552 552 editor=editor)
553 553
554 554 for name in names:
555 555 self.hook('tag', node=hex(node), tag=name, local=local)
556 556
557 557 return tagnode
558 558
559 559 def tag(self, names, node, message, local, user, date, editor=False):
560 560 '''tag a revision with one or more symbolic names.
561 561
562 562 names is a list of strings or, when adding a single tag, names may be a
563 563 string.
564 564
565 565 if local is True, the tags are stored in a per-repository file.
566 566 otherwise, they are stored in the .hgtags file, and a new
567 567 changeset is committed with the change.
568 568
569 569 keyword arguments:
570 570
571 571 local: whether to store tags in non-version-controlled file
572 572 (default False)
573 573
574 574 message: commit message to use if committing
575 575
576 576 user: name of user to use if committing
577 577
578 578 date: date tuple to use if committing'''
579 579
580 580 if not local:
581 581 for x in self.status()[:5]:
582 582 if '.hgtags' in x:
583 583 raise util.Abort(_('working copy of .hgtags is changed '
584 584 '(please commit .hgtags manually)'))
585 585
586 586 self.tags() # instantiate the cache
587 587 self._tag(names, node, message, local, user, date, editor=editor)
588 588
589 589 @filteredpropertycache
590 590 def _tagscache(self):
591 591 '''Returns a tagscache object that contains various tags related
592 592 caches.'''
593 593
594 594 # This simplifies its cache management by having one decorated
595 595 # function (this one) and the rest simply fetch things from it.
596 596 class tagscache(object):
597 597 def __init__(self):
598 598 # These two define the set of tags for this repository. tags
599 599 # maps tag name to node; tagtypes maps tag name to 'global' or
600 600 # 'local'. (Global tags are defined by .hgtags across all
601 601 # heads, and local tags are defined in .hg/localtags.)
602 602 # They constitute the in-memory cache of tags.
603 603 self.tags = self.tagtypes = None
604 604
605 605 self.nodetagscache = self.tagslist = None
606 606
607 607 cache = tagscache()
608 608 cache.tags, cache.tagtypes = self._findtags()
609 609
610 610 return cache
611 611
612 612 def tags(self):
613 613 '''return a mapping of tag to node'''
614 614 t = {}
615 615 if self.changelog.filteredrevs:
616 616 tags, tt = self._findtags()
617 617 else:
618 618 tags = self._tagscache.tags
619 619 for k, v in tags.iteritems():
620 620 try:
621 621 # ignore tags to unknown nodes
622 622 self.changelog.rev(v)
623 623 t[k] = v
624 624 except (error.LookupError, ValueError):
625 625 pass
626 626 return t
627 627
628 628 def _findtags(self):
629 629 '''Do the hard work of finding tags. Return a pair of dicts
630 630 (tags, tagtypes) where tags maps tag name to node, and tagtypes
631 631 maps tag name to a string like \'global\' or \'local\'.
632 632 Subclasses or extensions are free to add their own tags, but
633 633 should be aware that the returned dicts will be retained for the
634 634 duration of the localrepo object.'''
635 635
636 636 # XXX what tagtype should subclasses/extensions use? Currently
637 637 # mq and bookmarks add tags, but do not set the tagtype at all.
638 638 # Should each extension invent its own tag type? Should there
639 639 # be one tagtype for all such "virtual" tags? Or is the status
640 640 # quo fine?
641 641
642 642 alltags = {} # map tag name to (node, hist)
643 643 tagtypes = {}
644 644
645 645 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
646 646 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
647 647
648 648 # Build the return dicts. Have to re-encode tag names because
649 649 # the tags module always uses UTF-8 (in order not to lose info
650 650 # writing to the cache), but the rest of Mercurial wants them in
651 651 # local encoding.
652 652 tags = {}
653 653 for (name, (node, hist)) in alltags.iteritems():
654 654 if node != nullid:
655 655 tags[encoding.tolocal(name)] = node
656 656 tags['tip'] = self.changelog.tip()
657 657 tagtypes = dict([(encoding.tolocal(name), value)
658 658 for (name, value) in tagtypes.iteritems()])
659 659 return (tags, tagtypes)
660 660
661 661 def tagtype(self, tagname):
662 662 '''
663 663 return the type of the given tag. result can be:
664 664
665 665 'local' : a local tag
666 666 'global' : a global tag
667 667 None : tag does not exist
668 668 '''
669 669
670 670 return self._tagscache.tagtypes.get(tagname)
671 671
672 672 def tagslist(self):
673 673 '''return a list of tags ordered by revision'''
674 674 if not self._tagscache.tagslist:
675 675 l = []
676 676 for t, n in self.tags().iteritems():
677 677 r = self.changelog.rev(n)
678 678 l.append((r, t, n))
679 679 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
680 680
681 681 return self._tagscache.tagslist
682 682
683 683 def nodetags(self, node):
684 684 '''return the tags associated with a node'''
685 685 if not self._tagscache.nodetagscache:
686 686 nodetagscache = {}
687 687 for t, n in self._tagscache.tags.iteritems():
688 688 nodetagscache.setdefault(n, []).append(t)
689 689 for tags in nodetagscache.itervalues():
690 690 tags.sort()
691 691 self._tagscache.nodetagscache = nodetagscache
692 692 return self._tagscache.nodetagscache.get(node, [])
693 693
694 694 def nodebookmarks(self, node):
695 695 marks = []
696 696 for bookmark, n in self._bookmarks.iteritems():
697 697 if n == node:
698 698 marks.append(bookmark)
699 699 return sorted(marks)
700 700
701 701 def branchmap(self):
702 702 '''returns a dictionary {branch: [branchheads]} with branchheads
703 703 ordered by increasing revision number'''
704 704 branchmap.updatecache(self)
705 705 return self._branchcaches[self.filtername]
706 706
707 707 def branchtip(self, branch):
708 708 '''return the tip node for a given branch'''
709 709 try:
710 710 return self.branchmap().branchtip(branch)
711 711 except KeyError:
712 712 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
713 713
714 714 def lookup(self, key):
715 715 return self[key].node()
716 716
717 717 def lookupbranch(self, key, remote=None):
718 718 repo = remote or self
719 719 if key in repo.branchmap():
720 720 return key
721 721
722 722 repo = (remote and remote.local()) and remote or self
723 723 return repo[key].branch()
724 724
725 725 def known(self, nodes):
726 726 nm = self.changelog.nodemap
727 727 pc = self._phasecache
728 728 result = []
729 729 for n in nodes:
730 730 r = nm.get(n)
731 731 resp = not (r is None or pc.phase(self, r) >= phases.secret)
732 732 result.append(resp)
733 733 return result
734 734
735 735 def local(self):
736 736 return self
737 737
738 738 def cancopy(self):
739 739 # so statichttprepo's override of local() works
740 740 if not self.local():
741 741 return False
742 742 if not self.ui.configbool('phases', 'publish', True):
743 743 return True
744 744 # if publishing we can't copy if there is filtered content
745 745 return not self.filtered('visible').changelog.filteredrevs
746 746
747 747 def join(self, f):
748 748 return os.path.join(self.path, f)
749 749
750 750 def wjoin(self, f):
751 751 return os.path.join(self.root, f)
752 752
753 753 def file(self, f):
754 754 if f[0] == '/':
755 755 f = f[1:]
756 756 return filelog.filelog(self.sopener, f)
757 757
758 758 def changectx(self, changeid):
759 759 return self[changeid]
760 760
761 761 def parents(self, changeid=None):
762 762 '''get list of changectxs for parents of changeid'''
763 763 return self[changeid].parents()
764 764
765 765 def setparents(self, p1, p2=nullid):
766 766 copies = self.dirstate.setparents(p1, p2)
767 767 pctx = self[p1]
768 768 if copies:
769 769 # Adjust copy records, the dirstate cannot do it, it
770 770 # requires access to parents manifests. Preserve them
771 771 # only for entries added to first parent.
772 772 for f in copies:
773 773 if f not in pctx and copies[f] in pctx:
774 774 self.dirstate.copy(copies[f], f)
775 775 if p2 == nullid:
776 776 for f, s in sorted(self.dirstate.copies().items()):
777 777 if f not in pctx and s not in pctx:
778 778 self.dirstate.copy(None, f)
779 779
780 780 def filectx(self, path, changeid=None, fileid=None):
781 781 """changeid can be a changeset revision, node, or tag.
782 782 fileid can be a file revision or node."""
783 783 return context.filectx(self, path, changeid, fileid)
784 784
785 785 def getcwd(self):
786 786 return self.dirstate.getcwd()
787 787
788 788 def pathto(self, f, cwd=None):
789 789 return self.dirstate.pathto(f, cwd)
790 790
791 791 def wfile(self, f, mode='r'):
792 792 return self.wopener(f, mode)
793 793
794 794 def _link(self, f):
795 795 return self.wvfs.islink(f)
796 796
797 797 def _loadfilter(self, filter):
798 798 if filter not in self.filterpats:
799 799 l = []
800 800 for pat, cmd in self.ui.configitems(filter):
801 801 if cmd == '!':
802 802 continue
803 803 mf = matchmod.match(self.root, '', [pat])
804 804 fn = None
805 805 params = cmd
806 806 for name, filterfn in self._datafilters.iteritems():
807 807 if cmd.startswith(name):
808 808 fn = filterfn
809 809 params = cmd[len(name):].lstrip()
810 810 break
811 811 if not fn:
812 812 fn = lambda s, c, **kwargs: util.filter(s, c)
813 813 # Wrap old filters not supporting keyword arguments
814 814 if not inspect.getargspec(fn)[2]:
815 815 oldfn = fn
816 816 fn = lambda s, c, **kwargs: oldfn(s, c)
817 817 l.append((mf, fn, params))
818 818 self.filterpats[filter] = l
819 819 return self.filterpats[filter]
820 820
821 821 def _filter(self, filterpats, filename, data):
822 822 for mf, fn, cmd in filterpats:
823 823 if mf(filename):
824 824 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
825 825 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
826 826 break
827 827
828 828 return data
829 829
830 830 @unfilteredpropertycache
831 831 def _encodefilterpats(self):
832 832 return self._loadfilter('encode')
833 833
834 834 @unfilteredpropertycache
835 835 def _decodefilterpats(self):
836 836 return self._loadfilter('decode')
837 837
838 838 def adddatafilter(self, name, filter):
839 839 self._datafilters[name] = filter
840 840
841 841 def wread(self, filename):
842 842 if self._link(filename):
843 843 data = self.wvfs.readlink(filename)
844 844 else:
845 845 data = self.wopener.read(filename)
846 846 return self._filter(self._encodefilterpats, filename, data)
847 847
848 848 def wwrite(self, filename, data, flags):
849 849 data = self._filter(self._decodefilterpats, filename, data)
850 850 if 'l' in flags:
851 851 self.wopener.symlink(data, filename)
852 852 else:
853 853 self.wopener.write(filename, data)
854 854 if 'x' in flags:
855 855 self.wvfs.setflags(filename, False, True)
856 856
857 857 def wwritedata(self, filename, data):
858 858 return self._filter(self._decodefilterpats, filename, data)
859 859
860 860 def transaction(self, desc, report=None):
861 861 tr = self._transref and self._transref() or None
862 862 if tr and tr.running():
863 863 return tr.nest()
864 864
865 865 # abort here if the journal already exists
866 866 if self.svfs.exists("journal"):
867 867 raise error.RepoError(
868 868 _("abandoned transaction found"),
869 869 hint=_("run 'hg recover' to clean up transaction"))
870 870
871 871 def onclose():
872 872 self.store.write(self._transref())
873 873
874 874 self._writejournal(desc)
875 875 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
876 876 rp = report and report or self.ui.warn
877 877 tr = transaction.transaction(rp, self.sopener,
878 878 "journal",
879 879 aftertrans(renames),
880 880 self.store.createmode,
881 881 onclose)
882 882 self._transref = weakref.ref(tr)
883 883 return tr
884 884
885 885 def _journalfiles(self):
886 886 return ((self.svfs, 'journal'),
887 887 (self.vfs, 'journal.dirstate'),
888 888 (self.vfs, 'journal.branch'),
889 889 (self.vfs, 'journal.desc'),
890 890 (self.vfs, 'journal.bookmarks'),
891 891 (self.svfs, 'journal.phaseroots'))
892 892
893 893 def undofiles(self):
894 894 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
895 895
896 896 def _writejournal(self, desc):
897 897 self.opener.write("journal.dirstate",
898 898 self.opener.tryread("dirstate"))
899 899 self.opener.write("journal.branch",
900 900 encoding.fromlocal(self.dirstate.branch()))
901 901 self.opener.write("journal.desc",
902 902 "%d\n%s\n" % (len(self), desc))
903 903 self.opener.write("journal.bookmarks",
904 904 self.opener.tryread("bookmarks"))
905 905 self.sopener.write("journal.phaseroots",
906 906 self.sopener.tryread("phaseroots"))
907 907
908 908 def recover(self):
909 909 lock = self.lock()
910 910 try:
911 911 if self.svfs.exists("journal"):
912 912 self.ui.status(_("rolling back interrupted transaction\n"))
913 913 transaction.rollback(self.sopener, "journal",
914 914 self.ui.warn)
915 915 self.invalidate()
916 916 return True
917 917 else:
918 918 self.ui.warn(_("no interrupted transaction available\n"))
919 919 return False
920 920 finally:
921 921 lock.release()
922 922
923 923 def rollback(self, dryrun=False, force=False):
924 924 wlock = lock = None
925 925 try:
926 926 wlock = self.wlock()
927 927 lock = self.lock()
928 928 if self.svfs.exists("undo"):
929 929 return self._rollback(dryrun, force)
930 930 else:
931 931 self.ui.warn(_("no rollback information available\n"))
932 932 return 1
933 933 finally:
934 934 release(lock, wlock)
935 935
936 936 @unfilteredmethod # Until we get smarter cache management
937 937 def _rollback(self, dryrun, force):
938 938 ui = self.ui
939 939 try:
940 940 args = self.opener.read('undo.desc').splitlines()
941 941 (oldlen, desc, detail) = (int(args[0]), args[1], None)
942 942 if len(args) >= 3:
943 943 detail = args[2]
944 944 oldtip = oldlen - 1
945 945
946 946 if detail and ui.verbose:
947 947 msg = (_('repository tip rolled back to revision %s'
948 948 ' (undo %s: %s)\n')
949 949 % (oldtip, desc, detail))
950 950 else:
951 951 msg = (_('repository tip rolled back to revision %s'
952 952 ' (undo %s)\n')
953 953 % (oldtip, desc))
954 954 except IOError:
955 955 msg = _('rolling back unknown transaction\n')
956 956 desc = None
957 957
958 958 if not force and self['.'] != self['tip'] and desc == 'commit':
959 959 raise util.Abort(
960 960 _('rollback of last commit while not checked out '
961 961 'may lose data'), hint=_('use -f to force'))
962 962
963 963 ui.status(msg)
964 964 if dryrun:
965 965 return 0
966 966
967 967 parents = self.dirstate.parents()
968 968 self.destroying()
969 969 transaction.rollback(self.sopener, 'undo', ui.warn)
970 970 if self.vfs.exists('undo.bookmarks'):
971 971 self.vfs.rename('undo.bookmarks', 'bookmarks')
972 972 if self.svfs.exists('undo.phaseroots'):
973 973 self.svfs.rename('undo.phaseroots', 'phaseroots')
974 974 self.invalidate()
975 975
976 976 parentgone = (parents[0] not in self.changelog.nodemap or
977 977 parents[1] not in self.changelog.nodemap)
978 978 if parentgone:
979 979 self.vfs.rename('undo.dirstate', 'dirstate')
980 980 try:
981 981 branch = self.opener.read('undo.branch')
982 982 self.dirstate.setbranch(encoding.tolocal(branch))
983 983 except IOError:
984 984 ui.warn(_('named branch could not be reset: '
985 985 'current branch is still \'%s\'\n')
986 986 % self.dirstate.branch())
987 987
988 988 self.dirstate.invalidate()
989 989 parents = tuple([p.rev() for p in self.parents()])
990 990 if len(parents) > 1:
991 991 ui.status(_('working directory now based on '
992 992 'revisions %d and %d\n') % parents)
993 993 else:
994 994 ui.status(_('working directory now based on '
995 995 'revision %d\n') % parents)
996 996 # TODO: if we know which new heads may result from this rollback, pass
997 997 # them to destroy(), which will prevent the branchhead cache from being
998 998 # invalidated.
999 999 self.destroyed()
1000 1000 return 0
1001 1001
1002 1002 def invalidatecaches(self):
1003 1003
1004 1004 if '_tagscache' in vars(self):
1005 1005 # can't use delattr on proxy
1006 1006 del self.__dict__['_tagscache']
1007 1007
1008 1008 self.unfiltered()._branchcaches.clear()
1009 1009 self.invalidatevolatilesets()
1010 1010
1011 1011 def invalidatevolatilesets(self):
1012 1012 self.filteredrevcache.clear()
1013 1013 obsolete.clearobscaches(self)
1014 1014
1015 1015 def invalidatedirstate(self):
1016 1016 '''Invalidates the dirstate, causing the next call to dirstate
1017 1017 to check if it was modified since the last time it was read,
1018 1018 rereading it if it has.
1019 1019
1020 1020 This is different to dirstate.invalidate() that it doesn't always
1021 1021 rereads the dirstate. Use dirstate.invalidate() if you want to
1022 1022 explicitly read the dirstate again (i.e. restoring it to a previous
1023 1023 known good state).'''
1024 1024 if hasunfilteredcache(self, 'dirstate'):
1025 1025 for k in self.dirstate._filecache:
1026 1026 try:
1027 1027 delattr(self.dirstate, k)
1028 1028 except AttributeError:
1029 1029 pass
1030 1030 delattr(self.unfiltered(), 'dirstate')
1031 1031
1032 1032 def invalidate(self):
1033 1033 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1034 1034 for k in self._filecache:
1035 1035 # dirstate is invalidated separately in invalidatedirstate()
1036 1036 if k == 'dirstate':
1037 1037 continue
1038 1038
1039 1039 try:
1040 1040 delattr(unfiltered, k)
1041 1041 except AttributeError:
1042 1042 pass
1043 1043 self.invalidatecaches()
1044 1044 self.store.invalidatecaches()
1045 1045
1046 1046 def invalidateall(self):
1047 1047 '''Fully invalidates both store and non-store parts, causing the
1048 1048 subsequent operation to reread any outside changes.'''
1049 1049 # extension should hook this to invalidate its caches
1050 1050 self.invalidate()
1051 1051 self.invalidatedirstate()
1052 1052
1053 1053 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1054 1054 try:
1055 1055 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1056 1056 except error.LockHeld, inst:
1057 1057 if not wait:
1058 1058 raise
1059 1059 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1060 1060 (desc, inst.locker))
1061 1061 # default to 600 seconds timeout
1062 1062 l = lockmod.lock(vfs, lockname,
1063 1063 int(self.ui.config("ui", "timeout", "600")),
1064 1064 releasefn, desc=desc)
1065 1065 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1066 1066 if acquirefn:
1067 1067 acquirefn()
1068 1068 return l
1069 1069
1070 1070 def _afterlock(self, callback):
1071 1071 """add a callback to the current repository lock.
1072 1072
1073 1073 The callback will be executed on lock release."""
1074 1074 l = self._lockref and self._lockref()
1075 1075 if l:
1076 1076 l.postrelease.append(callback)
1077 1077 else:
1078 1078 callback()
1079 1079
1080 1080 def lock(self, wait=True):
1081 1081 '''Lock the repository store (.hg/store) and return a weak reference
1082 1082 to the lock. Use this before modifying the store (e.g. committing or
1083 1083 stripping). If you are opening a transaction, get a lock as well.)'''
1084 1084 l = self._lockref and self._lockref()
1085 1085 if l is not None and l.held:
1086 1086 l.lock()
1087 1087 return l
1088 1088
1089 1089 def unlock():
1090 1090 if hasunfilteredcache(self, '_phasecache'):
1091 1091 self._phasecache.write()
1092 1092 for k, ce in self._filecache.items():
1093 1093 if k == 'dirstate' or k not in self.__dict__:
1094 1094 continue
1095 1095 ce.refresh()
1096 1096
1097 1097 l = self._lock(self.svfs, "lock", wait, unlock,
1098 1098 self.invalidate, _('repository %s') % self.origroot)
1099 1099 self._lockref = weakref.ref(l)
1100 1100 return l
1101 1101
1102 1102 def wlock(self, wait=True):
1103 1103 '''Lock the non-store parts of the repository (everything under
1104 1104 .hg except .hg/store) and return a weak reference to the lock.
1105 1105 Use this before modifying files in .hg.'''
1106 1106 l = self._wlockref and self._wlockref()
1107 1107 if l is not None and l.held:
1108 1108 l.lock()
1109 1109 return l
1110 1110
1111 1111 def unlock():
1112 1112 self.dirstate.write()
1113 1113 self._filecache['dirstate'].refresh()
1114 1114
1115 1115 l = self._lock(self.vfs, "wlock", wait, unlock,
1116 1116 self.invalidatedirstate, _('working directory of %s') %
1117 1117 self.origroot)
1118 1118 self._wlockref = weakref.ref(l)
1119 1119 return l
1120 1120
1121 1121 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1122 1122 """
1123 1123 commit an individual file as part of a larger transaction
1124 1124 """
1125 1125
1126 1126 fname = fctx.path()
1127 1127 text = fctx.data()
1128 1128 flog = self.file(fname)
1129 1129 fparent1 = manifest1.get(fname, nullid)
1130 fparent2 = fparent2o = manifest2.get(fname, nullid)
1130 fparent2 = manifest2.get(fname, nullid)
1131 1131
1132 1132 meta = {}
1133 1133 copy = fctx.renamed()
1134 1134 if copy and copy[0] != fname:
1135 1135 # Mark the new revision of this file as a copy of another
1136 1136 # file. This copy data will effectively act as a parent
1137 1137 # of this new revision. If this is a merge, the first
1138 1138 # parent will be the nullid (meaning "look up the copy data")
1139 1139 # and the second one will be the other parent. For example:
1140 1140 #
1141 1141 # 0 --- 1 --- 3 rev1 changes file foo
1142 1142 # \ / rev2 renames foo to bar and changes it
1143 1143 # \- 2 -/ rev3 should have bar with all changes and
1144 1144 # should record that bar descends from
1145 1145 # bar in rev2 and foo in rev1
1146 1146 #
1147 1147 # this allows this merge to succeed:
1148 1148 #
1149 1149 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1150 1150 # \ / merging rev3 and rev4 should use bar@rev2
1151 1151 # \- 2 --- 4 as the merge base
1152 1152 #
1153 1153
1154 1154 cfname = copy[0]
1155 1155 crev = manifest1.get(cfname)
1156 1156 newfparent = fparent2
1157 1157
1158 1158 if manifest2: # branch merge
1159 1159 if fparent2 == nullid or crev is None: # copied on remote side
1160 1160 if cfname in manifest2:
1161 1161 crev = manifest2[cfname]
1162 1162 newfparent = fparent1
1163 1163
1164 1164 # find source in nearest ancestor if we've lost track
1165 1165 if not crev:
1166 1166 self.ui.debug(" %s: searching for copy revision for %s\n" %
1167 1167 (fname, cfname))
1168 1168 for ancestor in self[None].ancestors():
1169 1169 if cfname in ancestor:
1170 1170 crev = ancestor[cfname].filenode()
1171 1171 break
1172 1172
1173 1173 if crev:
1174 1174 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1175 1175 meta["copy"] = cfname
1176 1176 meta["copyrev"] = hex(crev)
1177 1177 fparent1, fparent2 = nullid, newfparent
1178 1178 else:
1179 1179 self.ui.warn(_("warning: can't find ancestor for '%s' "
1180 1180 "copied from '%s'!\n") % (fname, cfname))
1181 1181
1182 1182 elif fparent1 == nullid:
1183 1183 fparent1, fparent2 = fparent2, nullid
1184 1184 elif fparent2 != nullid:
1185 1185 # is one parent an ancestor of the other?
1186 1186 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1187 1187 if fparent1 in fparentancestors:
1188 1188 fparent1, fparent2 = fparent2, nullid
1189 1189 elif fparent2 in fparentancestors:
1190 1190 fparent2 = nullid
1191 1191
1192 1192 # is the file changed?
1193 1193 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1194 1194 changelist.append(fname)
1195 1195 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1196
1197 1196 # are just the flags changed during merge?
1198 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1197 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1199 1198 changelist.append(fname)
1200 1199
1201 1200 return fparent1
1202 1201
1203 1202 @unfilteredmethod
1204 1203 def commit(self, text="", user=None, date=None, match=None, force=False,
1205 1204 editor=False, extra={}):
1206 1205 """Add a new revision to current repository.
1207 1206
1208 1207 Revision information is gathered from the working directory,
1209 1208 match can be used to filter the committed files. If editor is
1210 1209 supplied, it is called to get a commit message.
1211 1210 """
1212 1211
1213 1212 def fail(f, msg):
1214 1213 raise util.Abort('%s: %s' % (f, msg))
1215 1214
1216 1215 if not match:
1217 1216 match = matchmod.always(self.root, '')
1218 1217
1219 1218 if not force:
1220 1219 vdirs = []
1221 1220 match.explicitdir = vdirs.append
1222 1221 match.bad = fail
1223 1222
1224 1223 wlock = self.wlock()
1225 1224 try:
1226 1225 wctx = self[None]
1227 1226 merge = len(wctx.parents()) > 1
1228 1227
1229 1228 if (not force and merge and match and
1230 1229 (match.files() or match.anypats())):
1231 1230 raise util.Abort(_('cannot partially commit a merge '
1232 1231 '(do not specify files or patterns)'))
1233 1232
1234 1233 changes = self.status(match=match, clean=force)
1235 1234 if force:
1236 1235 changes[0].extend(changes[6]) # mq may commit unchanged files
1237 1236
1238 1237 # check subrepos
1239 1238 subs = []
1240 1239 commitsubs = set()
1241 1240 newstate = wctx.substate.copy()
1242 1241 # only manage subrepos and .hgsubstate if .hgsub is present
1243 1242 if '.hgsub' in wctx:
1244 1243 # we'll decide whether to track this ourselves, thanks
1245 1244 for c in changes[:3]:
1246 1245 if '.hgsubstate' in c:
1247 1246 c.remove('.hgsubstate')
1248 1247
1249 1248 # compare current state to last committed state
1250 1249 # build new substate based on last committed state
1251 1250 oldstate = wctx.p1().substate
1252 1251 for s in sorted(newstate.keys()):
1253 1252 if not match(s):
1254 1253 # ignore working copy, use old state if present
1255 1254 if s in oldstate:
1256 1255 newstate[s] = oldstate[s]
1257 1256 continue
1258 1257 if not force:
1259 1258 raise util.Abort(
1260 1259 _("commit with new subrepo %s excluded") % s)
1261 1260 if wctx.sub(s).dirty(True):
1262 1261 if not self.ui.configbool('ui', 'commitsubrepos'):
1263 1262 raise util.Abort(
1264 1263 _("uncommitted changes in subrepo %s") % s,
1265 1264 hint=_("use --subrepos for recursive commit"))
1266 1265 subs.append(s)
1267 1266 commitsubs.add(s)
1268 1267 else:
1269 1268 bs = wctx.sub(s).basestate()
1270 1269 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1271 1270 if oldstate.get(s, (None, None, None))[1] != bs:
1272 1271 subs.append(s)
1273 1272
1274 1273 # check for removed subrepos
1275 1274 for p in wctx.parents():
1276 1275 r = [s for s in p.substate if s not in newstate]
1277 1276 subs += [s for s in r if match(s)]
1278 1277 if subs:
1279 1278 if (not match('.hgsub') and
1280 1279 '.hgsub' in (wctx.modified() + wctx.added())):
1281 1280 raise util.Abort(
1282 1281 _("can't commit subrepos without .hgsub"))
1283 1282 changes[0].insert(0, '.hgsubstate')
1284 1283
1285 1284 elif '.hgsub' in changes[2]:
1286 1285 # clean up .hgsubstate when .hgsub is removed
1287 1286 if ('.hgsubstate' in wctx and
1288 1287 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1289 1288 changes[2].insert(0, '.hgsubstate')
1290 1289
1291 1290 # make sure all explicit patterns are matched
1292 1291 if not force and match.files():
1293 1292 matched = set(changes[0] + changes[1] + changes[2])
1294 1293
1295 1294 for f in match.files():
1296 1295 f = self.dirstate.normalize(f)
1297 1296 if f == '.' or f in matched or f in wctx.substate:
1298 1297 continue
1299 1298 if f in changes[3]: # missing
1300 1299 fail(f, _('file not found!'))
1301 1300 if f in vdirs: # visited directory
1302 1301 d = f + '/'
1303 1302 for mf in matched:
1304 1303 if mf.startswith(d):
1305 1304 break
1306 1305 else:
1307 1306 fail(f, _("no match under directory!"))
1308 1307 elif f not in self.dirstate:
1309 1308 fail(f, _("file not tracked!"))
1310 1309
1311 1310 cctx = context.workingctx(self, text, user, date, extra, changes)
1312 1311
1313 1312 if (not force and not extra.get("close") and not merge
1314 1313 and not cctx.files()
1315 1314 and wctx.branch() == wctx.p1().branch()):
1316 1315 return None
1317 1316
1318 1317 if merge and cctx.deleted():
1319 1318 raise util.Abort(_("cannot commit merge with missing files"))
1320 1319
1321 1320 ms = mergemod.mergestate(self)
1322 1321 for f in changes[0]:
1323 1322 if f in ms and ms[f] == 'u':
1324 1323 raise util.Abort(_("unresolved merge conflicts "
1325 1324 "(see hg help resolve)"))
1326 1325
1327 1326 if editor:
1328 1327 cctx._text = editor(self, cctx, subs)
1329 1328 edited = (text != cctx._text)
1330 1329
1331 1330 # Save commit message in case this transaction gets rolled back
1332 1331 # (e.g. by a pretxncommit hook). Leave the content alone on
1333 1332 # the assumption that the user will use the same editor again.
1334 1333 msgfn = self.savecommitmessage(cctx._text)
1335 1334
1336 1335 # commit subs and write new state
1337 1336 if subs:
1338 1337 for s in sorted(commitsubs):
1339 1338 sub = wctx.sub(s)
1340 1339 self.ui.status(_('committing subrepository %s\n') %
1341 1340 subrepo.subrelpath(sub))
1342 1341 sr = sub.commit(cctx._text, user, date)
1343 1342 newstate[s] = (newstate[s][0], sr)
1344 1343 subrepo.writestate(self, newstate)
1345 1344
1346 1345 p1, p2 = self.dirstate.parents()
1347 1346 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1348 1347 try:
1349 1348 self.hook("precommit", throw=True, parent1=hookp1,
1350 1349 parent2=hookp2)
1351 1350 ret = self.commitctx(cctx, True)
1352 1351 except: # re-raises
1353 1352 if edited:
1354 1353 self.ui.write(
1355 1354 _('note: commit message saved in %s\n') % msgfn)
1356 1355 raise
1357 1356
1358 1357 # update bookmarks, dirstate and mergestate
1359 1358 bookmarks.update(self, [p1, p2], ret)
1360 1359 cctx.markcommitted(ret)
1361 1360 ms.reset()
1362 1361 finally:
1363 1362 wlock.release()
1364 1363
1365 1364 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1366 1365 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1367 1366 self._afterlock(commithook)
1368 1367 return ret
1369 1368
1370 1369 @unfilteredmethod
1371 1370 def commitctx(self, ctx, error=False):
1372 1371 """Add a new revision to current repository.
1373 1372 Revision information is passed via the context argument.
1374 1373 """
1375 1374
1376 1375 tr = lock = None
1377 1376 removed = list(ctx.removed())
1378 1377 p1, p2 = ctx.p1(), ctx.p2()
1379 1378 user = ctx.user()
1380 1379
1381 1380 lock = self.lock()
1382 1381 try:
1383 1382 tr = self.transaction("commit")
1384 1383 trp = weakref.proxy(tr)
1385 1384
1386 1385 if ctx.files():
1387 1386 m1 = p1.manifest().copy()
1388 1387 m2 = p2.manifest()
1389 1388
1390 1389 # check in files
1391 1390 new = {}
1392 1391 changed = []
1393 1392 linkrev = len(self)
1394 1393 for f in sorted(ctx.modified() + ctx.added()):
1395 1394 self.ui.note(f + "\n")
1396 1395 try:
1397 1396 fctx = ctx[f]
1398 1397 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1399 1398 changed)
1400 1399 m1.set(f, fctx.flags())
1401 1400 except OSError, inst:
1402 1401 self.ui.warn(_("trouble committing %s!\n") % f)
1403 1402 raise
1404 1403 except IOError, inst:
1405 1404 errcode = getattr(inst, 'errno', errno.ENOENT)
1406 1405 if error or errcode and errcode != errno.ENOENT:
1407 1406 self.ui.warn(_("trouble committing %s!\n") % f)
1408 1407 raise
1409 1408 else:
1410 1409 removed.append(f)
1411 1410
1412 1411 # update manifest
1413 1412 m1.update(new)
1414 1413 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1415 1414 drop = [f for f in removed if f in m1]
1416 1415 for f in drop:
1417 1416 del m1[f]
1418 1417 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1419 1418 p2.manifestnode(), (new, drop))
1420 1419 files = changed + removed
1421 1420 else:
1422 1421 mn = p1.manifestnode()
1423 1422 files = []
1424 1423
1425 1424 # update changelog
1426 1425 self.changelog.delayupdate()
1427 1426 n = self.changelog.add(mn, files, ctx.description(),
1428 1427 trp, p1.node(), p2.node(),
1429 1428 user, ctx.date(), ctx.extra().copy())
1430 1429 p = lambda: self.changelog.writepending() and self.root or ""
1431 1430 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1432 1431 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1433 1432 parent2=xp2, pending=p)
1434 1433 self.changelog.finalize(trp)
1435 1434 # set the new commit is proper phase
1436 1435 targetphase = subrepo.newcommitphase(self.ui, ctx)
1437 1436 if targetphase:
1438 1437 # retract boundary do not alter parent changeset.
1439 1438 # if a parent have higher the resulting phase will
1440 1439 # be compliant anyway
1441 1440 #
1442 1441 # if minimal phase was 0 we don't need to retract anything
1443 1442 phases.retractboundary(self, targetphase, [n])
1444 1443 tr.close()
1445 1444 branchmap.updatecache(self.filtered('served'))
1446 1445 return n
1447 1446 finally:
1448 1447 if tr:
1449 1448 tr.release()
1450 1449 lock.release()
1451 1450
1452 1451 @unfilteredmethod
1453 1452 def destroying(self):
1454 1453 '''Inform the repository that nodes are about to be destroyed.
1455 1454 Intended for use by strip and rollback, so there's a common
1456 1455 place for anything that has to be done before destroying history.
1457 1456
1458 1457 This is mostly useful for saving state that is in memory and waiting
1459 1458 to be flushed when the current lock is released. Because a call to
1460 1459 destroyed is imminent, the repo will be invalidated causing those
1461 1460 changes to stay in memory (waiting for the next unlock), or vanish
1462 1461 completely.
1463 1462 '''
1464 1463 # When using the same lock to commit and strip, the phasecache is left
1465 1464 # dirty after committing. Then when we strip, the repo is invalidated,
1466 1465 # causing those changes to disappear.
1467 1466 if '_phasecache' in vars(self):
1468 1467 self._phasecache.write()
1469 1468
1470 1469 @unfilteredmethod
1471 1470 def destroyed(self):
1472 1471 '''Inform the repository that nodes have been destroyed.
1473 1472 Intended for use by strip and rollback, so there's a common
1474 1473 place for anything that has to be done after destroying history.
1475 1474 '''
1476 1475 # When one tries to:
1477 1476 # 1) destroy nodes thus calling this method (e.g. strip)
1478 1477 # 2) use phasecache somewhere (e.g. commit)
1479 1478 #
1480 1479 # then 2) will fail because the phasecache contains nodes that were
1481 1480 # removed. We can either remove phasecache from the filecache,
1482 1481 # causing it to reload next time it is accessed, or simply filter
1483 1482 # the removed nodes now and write the updated cache.
1484 1483 self._phasecache.filterunknown(self)
1485 1484 self._phasecache.write()
1486 1485
1487 1486 # update the 'served' branch cache to help read only server process
1488 1487 # Thanks to branchcache collaboration this is done from the nearest
1489 1488 # filtered subset and it is expected to be fast.
1490 1489 branchmap.updatecache(self.filtered('served'))
1491 1490
1492 1491 # Ensure the persistent tag cache is updated. Doing it now
1493 1492 # means that the tag cache only has to worry about destroyed
1494 1493 # heads immediately after a strip/rollback. That in turn
1495 1494 # guarantees that "cachetip == currenttip" (comparing both rev
1496 1495 # and node) always means no nodes have been added or destroyed.
1497 1496
1498 1497 # XXX this is suboptimal when qrefresh'ing: we strip the current
1499 1498 # head, refresh the tag cache, then immediately add a new head.
1500 1499 # But I think doing it this way is necessary for the "instant
1501 1500 # tag cache retrieval" case to work.
1502 1501 self.invalidate()
1503 1502
1504 1503 def walk(self, match, node=None):
1505 1504 '''
1506 1505 walk recursively through the directory tree or a given
1507 1506 changeset, finding all files matched by the match
1508 1507 function
1509 1508 '''
1510 1509 return self[node].walk(match)
1511 1510
1512 1511 def status(self, node1='.', node2=None, match=None,
1513 1512 ignored=False, clean=False, unknown=False,
1514 1513 listsubrepos=False):
1515 1514 '''a convenience method that calls node1.status(node2)'''
1516 1515 return self[node1].status(node2, match, ignored, clean, unknown,
1517 1516 listsubrepos)
1518 1517
1519 1518 def heads(self, start=None):
1520 1519 heads = self.changelog.heads(start)
1521 1520 # sort the output in rev descending order
1522 1521 return sorted(heads, key=self.changelog.rev, reverse=True)
1523 1522
1524 1523 def branchheads(self, branch=None, start=None, closed=False):
1525 1524 '''return a (possibly filtered) list of heads for the given branch
1526 1525
1527 1526 Heads are returned in topological order, from newest to oldest.
1528 1527 If branch is None, use the dirstate branch.
1529 1528 If start is not None, return only heads reachable from start.
1530 1529 If closed is True, return heads that are marked as closed as well.
1531 1530 '''
1532 1531 if branch is None:
1533 1532 branch = self[None].branch()
1534 1533 branches = self.branchmap()
1535 1534 if branch not in branches:
1536 1535 return []
1537 1536 # the cache returns heads ordered lowest to highest
1538 1537 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1539 1538 if start is not None:
1540 1539 # filter out the heads that cannot be reached from startrev
1541 1540 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1542 1541 bheads = [h for h in bheads if h in fbheads]
1543 1542 return bheads
1544 1543
1545 1544 def branches(self, nodes):
1546 1545 if not nodes:
1547 1546 nodes = [self.changelog.tip()]
1548 1547 b = []
1549 1548 for n in nodes:
1550 1549 t = n
1551 1550 while True:
1552 1551 p = self.changelog.parents(n)
1553 1552 if p[1] != nullid or p[0] == nullid:
1554 1553 b.append((t, n, p[0], p[1]))
1555 1554 break
1556 1555 n = p[0]
1557 1556 return b
1558 1557
1559 1558 def between(self, pairs):
1560 1559 r = []
1561 1560
1562 1561 for top, bottom in pairs:
1563 1562 n, l, i = top, [], 0
1564 1563 f = 1
1565 1564
1566 1565 while n != bottom and n != nullid:
1567 1566 p = self.changelog.parents(n)[0]
1568 1567 if i == f:
1569 1568 l.append(n)
1570 1569 f = f * 2
1571 1570 n = p
1572 1571 i += 1
1573 1572
1574 1573 r.append(l)
1575 1574
1576 1575 return r
1577 1576
1578 1577 def pull(self, remote, heads=None, force=False):
1579 1578 return exchange.pull (self, remote, heads, force)
1580 1579
1581 1580 def checkpush(self, pushop):
1582 1581 """Extensions can override this function if additional checks have
1583 1582 to be performed before pushing, or call it if they override push
1584 1583 command.
1585 1584 """
1586 1585 pass
1587 1586
1588 1587 @unfilteredpropertycache
1589 1588 def prepushoutgoinghooks(self):
1590 1589 """Return util.hooks consists of "(repo, remote, outgoing)"
1591 1590 functions, which are called before pushing changesets.
1592 1591 """
1593 1592 return util.hooks()
1594 1593
1595 1594 def push(self, remote, force=False, revs=None, newbranch=False):
1596 1595 return exchange.push(self, remote, force, revs, newbranch)
1597 1596
1598 1597 def stream_in(self, remote, requirements):
1599 1598 lock = self.lock()
1600 1599 try:
1601 1600 # Save remote branchmap. We will use it later
1602 1601 # to speed up branchcache creation
1603 1602 rbranchmap = None
1604 1603 if remote.capable("branchmap"):
1605 1604 rbranchmap = remote.branchmap()
1606 1605
1607 1606 fp = remote.stream_out()
1608 1607 l = fp.readline()
1609 1608 try:
1610 1609 resp = int(l)
1611 1610 except ValueError:
1612 1611 raise error.ResponseError(
1613 1612 _('unexpected response from remote server:'), l)
1614 1613 if resp == 1:
1615 1614 raise util.Abort(_('operation forbidden by server'))
1616 1615 elif resp == 2:
1617 1616 raise util.Abort(_('locking the remote repository failed'))
1618 1617 elif resp != 0:
1619 1618 raise util.Abort(_('the server sent an unknown error code'))
1620 1619 self.ui.status(_('streaming all changes\n'))
1621 1620 l = fp.readline()
1622 1621 try:
1623 1622 total_files, total_bytes = map(int, l.split(' ', 1))
1624 1623 except (ValueError, TypeError):
1625 1624 raise error.ResponseError(
1626 1625 _('unexpected response from remote server:'), l)
1627 1626 self.ui.status(_('%d files to transfer, %s of data\n') %
1628 1627 (total_files, util.bytecount(total_bytes)))
1629 1628 handled_bytes = 0
1630 1629 self.ui.progress(_('clone'), 0, total=total_bytes)
1631 1630 start = time.time()
1632 1631
1633 1632 tr = self.transaction(_('clone'))
1634 1633 try:
1635 1634 for i in xrange(total_files):
1636 1635 # XXX doesn't support '\n' or '\r' in filenames
1637 1636 l = fp.readline()
1638 1637 try:
1639 1638 name, size = l.split('\0', 1)
1640 1639 size = int(size)
1641 1640 except (ValueError, TypeError):
1642 1641 raise error.ResponseError(
1643 1642 _('unexpected response from remote server:'), l)
1644 1643 if self.ui.debugflag:
1645 1644 self.ui.debug('adding %s (%s)\n' %
1646 1645 (name, util.bytecount(size)))
1647 1646 # for backwards compat, name was partially encoded
1648 1647 ofp = self.sopener(store.decodedir(name), 'w')
1649 1648 for chunk in util.filechunkiter(fp, limit=size):
1650 1649 handled_bytes += len(chunk)
1651 1650 self.ui.progress(_('clone'), handled_bytes,
1652 1651 total=total_bytes)
1653 1652 ofp.write(chunk)
1654 1653 ofp.close()
1655 1654 tr.close()
1656 1655 finally:
1657 1656 tr.release()
1658 1657
1659 1658 # Writing straight to files circumvented the inmemory caches
1660 1659 self.invalidate()
1661 1660
1662 1661 elapsed = time.time() - start
1663 1662 if elapsed <= 0:
1664 1663 elapsed = 0.001
1665 1664 self.ui.progress(_('clone'), None)
1666 1665 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1667 1666 (util.bytecount(total_bytes), elapsed,
1668 1667 util.bytecount(total_bytes / elapsed)))
1669 1668
1670 1669 # new requirements = old non-format requirements +
1671 1670 # new format-related
1672 1671 # requirements from the streamed-in repository
1673 1672 requirements.update(set(self.requirements) - self.supportedformats)
1674 1673 self._applyrequirements(requirements)
1675 1674 self._writerequirements()
1676 1675
1677 1676 if rbranchmap:
1678 1677 rbheads = []
1679 1678 for bheads in rbranchmap.itervalues():
1680 1679 rbheads.extend(bheads)
1681 1680
1682 1681 if rbheads:
1683 1682 rtiprev = max((int(self.changelog.rev(node))
1684 1683 for node in rbheads))
1685 1684 cache = branchmap.branchcache(rbranchmap,
1686 1685 self[rtiprev].node(),
1687 1686 rtiprev)
1688 1687 # Try to stick it as low as possible
1689 1688 # filter above served are unlikely to be fetch from a clone
1690 1689 for candidate in ('base', 'immutable', 'served'):
1691 1690 rview = self.filtered(candidate)
1692 1691 if cache.validfor(rview):
1693 1692 self._branchcaches[candidate] = cache
1694 1693 cache.write(rview)
1695 1694 break
1696 1695 self.invalidate()
1697 1696 return len(self.heads()) + 1
1698 1697 finally:
1699 1698 lock.release()
1700 1699
1701 1700 def clone(self, remote, heads=[], stream=False):
1702 1701 '''clone remote repository.
1703 1702
1704 1703 keyword arguments:
1705 1704 heads: list of revs to clone (forces use of pull)
1706 1705 stream: use streaming clone if possible'''
1707 1706
1708 1707 # now, all clients that can request uncompressed clones can
1709 1708 # read repo formats supported by all servers that can serve
1710 1709 # them.
1711 1710
1712 1711 # if revlog format changes, client will have to check version
1713 1712 # and format flags on "stream" capability, and use
1714 1713 # uncompressed only if compatible.
1715 1714
1716 1715 if not stream:
1717 1716 # if the server explicitly prefers to stream (for fast LANs)
1718 1717 stream = remote.capable('stream-preferred')
1719 1718
1720 1719 if stream and not heads:
1721 1720 # 'stream' means remote revlog format is revlogv1 only
1722 1721 if remote.capable('stream'):
1723 1722 return self.stream_in(remote, set(('revlogv1',)))
1724 1723 # otherwise, 'streamreqs' contains the remote revlog format
1725 1724 streamreqs = remote.capable('streamreqs')
1726 1725 if streamreqs:
1727 1726 streamreqs = set(streamreqs.split(','))
1728 1727 # if we support it, stream in and adjust our requirements
1729 1728 if not streamreqs - self.supportedformats:
1730 1729 return self.stream_in(remote, streamreqs)
1731 1730 return self.pull(remote, heads)
1732 1731
1733 1732 def pushkey(self, namespace, key, old, new):
1734 1733 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1735 1734 old=old, new=new)
1736 1735 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1737 1736 ret = pushkey.push(self, namespace, key, old, new)
1738 1737 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1739 1738 ret=ret)
1740 1739 return ret
1741 1740
1742 1741 def listkeys(self, namespace):
1743 1742 self.hook('prelistkeys', throw=True, namespace=namespace)
1744 1743 self.ui.debug('listing keys for "%s"\n' % namespace)
1745 1744 values = pushkey.list(self, namespace)
1746 1745 self.hook('listkeys', namespace=namespace, values=values)
1747 1746 return values
1748 1747
1749 1748 def debugwireargs(self, one, two, three=None, four=None, five=None):
1750 1749 '''used to test argument passing over the wire'''
1751 1750 return "%s %s %s %s %s" % (one, two, three, four, five)
1752 1751
1753 1752 def savecommitmessage(self, text):
1754 1753 fp = self.opener('last-message.txt', 'wb')
1755 1754 try:
1756 1755 fp.write(text)
1757 1756 finally:
1758 1757 fp.close()
1759 1758 return self.pathto(fp.name[len(self.root) + 1:])
1760 1759
1761 1760 # used to avoid circular references so destructors work
1762 1761 def aftertrans(files):
1763 1762 renamefiles = [tuple(t) for t in files]
1764 1763 def a():
1765 1764 for vfs, src, dest in renamefiles:
1766 1765 try:
1767 1766 vfs.rename(src, dest)
1768 1767 except OSError: # journal file does not yet exist
1769 1768 pass
1770 1769 return a
1771 1770
1772 1771 def undoname(fn):
1773 1772 base, name = os.path.split(fn)
1774 1773 assert name.startswith('journal')
1775 1774 return os.path.join(base, name.replace('journal', 'undo', 1))
1776 1775
1777 1776 def instance(ui, path, create):
1778 1777 return localrepository(ui, util.urllocalpath(path), create)
1779 1778
1780 1779 def islocal(path):
1781 1780 return True
@@ -1,55 +1,57
1 1 b51a8138292a introduced a regression where we would mention in the
2 2 changelog executable files added by the second parent of a merge. Test
3 3 that that doesn't happen anymore
4 4
5 5 $ "$TESTDIR/hghave" execbit || exit 80
6 6
7 7 $ hg init repo
8 8 $ cd repo
9 9 $ echo foo > foo
10 10 $ hg ci -qAm 'add foo'
11 11
12 12 $ echo bar > bar
13 13 $ chmod +x bar
14 14 $ hg ci -qAm 'add bar'
15 15
16 16 manifest of p2:
17 17
18 18 $ hg manifest
19 19 bar
20 20 foo
21 21
22 22 $ hg up -qC 0
23 23 $ echo >> foo
24 24 $ hg ci -m 'change foo'
25 25 created new head
26 26
27 27 manifest of p1:
28 28
29 29 $ hg manifest
30 30 foo
31 31
32 32 $ hg merge
33 33 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
34 34 (branch merge, don't forget to commit)
35 $ chmod +x foo
35 36 $ hg ci -m 'merge'
36 37
37 this should not mention bar:
38 this should not mention bar but should mention foo:
38 39
39 40 $ hg tip -v
40 changeset: 3:ef2fc9b4a51b
41 changeset: 3:c53d17ff3380
41 42 tag: tip
42 43 parent: 2:ed1b79f46b9a
43 44 parent: 1:d394a8db219b
44 45 user: test
45 46 date: Thu Jan 01 00:00:00 1970 +0000
47 files: foo
46 48 description:
47 49 merge
48 50
49 51
50 52
51 53 $ hg debugindex bar
52 54 rev offset length ..... linkrev nodeid p1 p2 (re)
53 55 0 0 5 ..... 1 b004912a8510 000000000000 000000000000 (re)
54 56
55 57 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now