##// END OF EJS Templates
localrepo: use commonancestorsheads for checking linear heritage in file commit...
Mads Kiilerich -
r21106:53433d8f default
parent child Browse files
Show More
@@ -1,1906 +1,1906 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock as lockmod
12 12 import transaction, store, encoding, exchange, bundle2
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 import branchmap, pathutil
20 20 propertycache = util.propertycache
21 21 filecache = scmutil.filecache
22 22
23 23 class repofilecache(filecache):
24 24 """All filecache usage on repo are done for logic that should be unfiltered
25 25 """
26 26
27 27 def __get__(self, repo, type=None):
28 28 return super(repofilecache, self).__get__(repo.unfiltered(), type)
29 29 def __set__(self, repo, value):
30 30 return super(repofilecache, self).__set__(repo.unfiltered(), value)
31 31 def __delete__(self, repo):
32 32 return super(repofilecache, self).__delete__(repo.unfiltered())
33 33
34 34 class storecache(repofilecache):
35 35 """filecache for files in the store"""
36 36 def join(self, obj, fname):
37 37 return obj.sjoin(fname)
38 38
39 39 class unfilteredpropertycache(propertycache):
40 40 """propertycache that apply to unfiltered repo only"""
41 41
42 42 def __get__(self, repo, type=None):
43 43 unfi = repo.unfiltered()
44 44 if unfi is repo:
45 45 return super(unfilteredpropertycache, self).__get__(unfi)
46 46 return getattr(unfi, self.name)
47 47
48 48 class filteredpropertycache(propertycache):
49 49 """propertycache that must take filtering in account"""
50 50
51 51 def cachevalue(self, obj, value):
52 52 object.__setattr__(obj, self.name, value)
53 53
54 54
55 55 def hasunfilteredcache(repo, name):
56 56 """check if a repo has an unfilteredpropertycache value for <name>"""
57 57 return name in vars(repo.unfiltered())
58 58
59 59 def unfilteredmethod(orig):
60 60 """decorate method that always need to be run on unfiltered version"""
61 61 def wrapper(repo, *args, **kwargs):
62 62 return orig(repo.unfiltered(), *args, **kwargs)
63 63 return wrapper
64 64
65 65 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
66 66 'bundle2', 'unbundle'))
67 67 legacycaps = moderncaps.union(set(['changegroupsubset']))
68 68
69 69 class localpeer(peer.peerrepository):
70 70 '''peer for a local repo; reflects only the most recent API'''
71 71
72 72 def __init__(self, repo, caps=moderncaps):
73 73 peer.peerrepository.__init__(self)
74 74 self._repo = repo.filtered('served')
75 75 self.ui = repo.ui
76 76 self._caps = repo._restrictcapabilities(caps)
77 77 self.requirements = repo.requirements
78 78 self.supportedformats = repo.supportedformats
79 79
80 80 def close(self):
81 81 self._repo.close()
82 82
83 83 def _capabilities(self):
84 84 return self._caps
85 85
86 86 def local(self):
87 87 return self._repo
88 88
89 89 def canpush(self):
90 90 return True
91 91
92 92 def url(self):
93 93 return self._repo.url()
94 94
95 95 def lookup(self, key):
96 96 return self._repo.lookup(key)
97 97
98 98 def branchmap(self):
99 99 return self._repo.branchmap()
100 100
101 101 def heads(self):
102 102 return self._repo.heads()
103 103
104 104 def known(self, nodes):
105 105 return self._repo.known(nodes)
106 106
107 107 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
108 108 format='HG10'):
109 109 cg = exchange.getbundle(self._repo, source, heads=heads,
110 110 common=common, bundlecaps=bundlecaps)
111 111 if bundlecaps is not None and 'HG20' in bundlecaps:
112 112 # When requesting a bundle2, getbundle returns a stream to make the
113 113 # wire level function happier. We need to build a proper object
114 114 # from it in local peer.
115 115 cg = bundle2.unbundle20(self.ui, cg)
116 116 return cg
117 117
118 118 # TODO We might want to move the next two calls into legacypeer and add
119 119 # unbundle instead.
120 120
121 121 def unbundle(self, cg, heads, url):
122 122 """apply a bundle on a repo
123 123
124 124 This function handles the repo locking itself."""
125 125 try:
126 126 cg = exchange.readbundle(self.ui, cg, None)
127 127 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
128 128 if util.safehasattr(ret, 'getchunks'):
129 129 # This is a bundle20 object, turn it into an unbundler.
130 130 # This little dance should be dropped eventually when the API
131 131 # is finally improved.
132 132 stream = util.chunkbuffer(ret.getchunks())
133 133 ret = bundle2.unbundle20(self.ui, stream)
134 134 return ret
135 135 except exchange.PushRaced, exc:
136 136 raise error.ResponseError(_('push failed:'), exc.message)
137 137
138 138 def lock(self):
139 139 return self._repo.lock()
140 140
141 141 def addchangegroup(self, cg, source, url):
142 142 return changegroup.addchangegroup(self._repo, cg, source, url)
143 143
144 144 def pushkey(self, namespace, key, old, new):
145 145 return self._repo.pushkey(namespace, key, old, new)
146 146
147 147 def listkeys(self, namespace):
148 148 return self._repo.listkeys(namespace)
149 149
150 150 def debugwireargs(self, one, two, three=None, four=None, five=None):
151 151 '''used to test argument passing over the wire'''
152 152 return "%s %s %s %s %s" % (one, two, three, four, five)
153 153
154 154 class locallegacypeer(localpeer):
155 155 '''peer extension which implements legacy methods too; used for tests with
156 156 restricted capabilities'''
157 157
158 158 def __init__(self, repo):
159 159 localpeer.__init__(self, repo, caps=legacycaps)
160 160
161 161 def branches(self, nodes):
162 162 return self._repo.branches(nodes)
163 163
164 164 def between(self, pairs):
165 165 return self._repo.between(pairs)
166 166
167 167 def changegroup(self, basenodes, source):
168 168 return changegroup.changegroup(self._repo, basenodes, source)
169 169
170 170 def changegroupsubset(self, bases, heads, source):
171 171 return changegroup.changegroupsubset(self._repo, bases, heads, source)
172 172
173 173 class localrepository(object):
174 174
175 175 supportedformats = set(('revlogv1', 'generaldelta'))
176 176 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
177 177 'dotencode'))
178 178 openerreqs = set(('revlogv1', 'generaldelta'))
179 179 requirements = ['revlogv1']
180 180 filtername = None
181 181
182 182 # a list of (ui, featureset) functions.
183 183 # only functions defined in module of enabled extensions are invoked
184 184 featuresetupfuncs = set()
185 185
186 186 def _baserequirements(self, create):
187 187 return self.requirements[:]
188 188
189 189 def __init__(self, baseui, path=None, create=False):
190 190 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
191 191 self.wopener = self.wvfs
192 192 self.root = self.wvfs.base
193 193 self.path = self.wvfs.join(".hg")
194 194 self.origroot = path
195 195 self.auditor = pathutil.pathauditor(self.root, self._checknested)
196 196 self.vfs = scmutil.vfs(self.path)
197 197 self.opener = self.vfs
198 198 self.baseui = baseui
199 199 self.ui = baseui.copy()
200 200 self.ui.copy = baseui.copy # prevent copying repo configuration
201 201 # A list of callback to shape the phase if no data were found.
202 202 # Callback are in the form: func(repo, roots) --> processed root.
203 203 # This list it to be filled by extension during repo setup
204 204 self._phasedefaults = []
205 205 try:
206 206 self.ui.readconfig(self.join("hgrc"), self.root)
207 207 extensions.loadall(self.ui)
208 208 except IOError:
209 209 pass
210 210
211 211 if self.featuresetupfuncs:
212 212 self.supported = set(self._basesupported) # use private copy
213 213 extmods = set(m.__name__ for n, m
214 214 in extensions.extensions(self.ui))
215 215 for setupfunc in self.featuresetupfuncs:
216 216 if setupfunc.__module__ in extmods:
217 217 setupfunc(self.ui, self.supported)
218 218 else:
219 219 self.supported = self._basesupported
220 220
221 221 if not self.vfs.isdir():
222 222 if create:
223 223 if not self.wvfs.exists():
224 224 self.wvfs.makedirs()
225 225 self.vfs.makedir(notindexed=True)
226 226 requirements = self._baserequirements(create)
227 227 if self.ui.configbool('format', 'usestore', True):
228 228 self.vfs.mkdir("store")
229 229 requirements.append("store")
230 230 if self.ui.configbool('format', 'usefncache', True):
231 231 requirements.append("fncache")
232 232 if self.ui.configbool('format', 'dotencode', True):
233 233 requirements.append('dotencode')
234 234 # create an invalid changelog
235 235 self.vfs.append(
236 236 "00changelog.i",
237 237 '\0\0\0\2' # represents revlogv2
238 238 ' dummy changelog to prevent using the old repo layout'
239 239 )
240 240 if self.ui.configbool('format', 'generaldelta', False):
241 241 requirements.append("generaldelta")
242 242 requirements = set(requirements)
243 243 else:
244 244 raise error.RepoError(_("repository %s not found") % path)
245 245 elif create:
246 246 raise error.RepoError(_("repository %s already exists") % path)
247 247 else:
248 248 try:
249 249 requirements = scmutil.readrequires(self.vfs, self.supported)
250 250 except IOError, inst:
251 251 if inst.errno != errno.ENOENT:
252 252 raise
253 253 requirements = set()
254 254
255 255 self.sharedpath = self.path
256 256 try:
257 257 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
258 258 realpath=True)
259 259 s = vfs.base
260 260 if not vfs.exists():
261 261 raise error.RepoError(
262 262 _('.hg/sharedpath points to nonexistent directory %s') % s)
263 263 self.sharedpath = s
264 264 except IOError, inst:
265 265 if inst.errno != errno.ENOENT:
266 266 raise
267 267
268 268 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
269 269 self.spath = self.store.path
270 270 self.svfs = self.store.vfs
271 271 self.sopener = self.svfs
272 272 self.sjoin = self.store.join
273 273 self.vfs.createmode = self.store.createmode
274 274 self._applyrequirements(requirements)
275 275 if create:
276 276 self._writerequirements()
277 277
278 278
279 279 self._branchcaches = {}
280 280 self.filterpats = {}
281 281 self._datafilters = {}
282 282 self._transref = self._lockref = self._wlockref = None
283 283
284 284 # A cache for various files under .hg/ that tracks file changes,
285 285 # (used by the filecache decorator)
286 286 #
287 287 # Maps a property name to its util.filecacheentry
288 288 self._filecache = {}
289 289
290 290 # hold sets of revision to be filtered
291 291 # should be cleared when something might have changed the filter value:
292 292 # - new changesets,
293 293 # - phase change,
294 294 # - new obsolescence marker,
295 295 # - working directory parent change,
296 296 # - bookmark changes
297 297 self.filteredrevcache = {}
298 298
299 299 def close(self):
300 300 pass
301 301
302 302 def _restrictcapabilities(self, caps):
303 303 # bundle2 is not ready for prime time, drop it unless explicitly
304 304 # required by the tests (or some brave tester)
305 305 if not self.ui.configbool('server', 'bundle2', False):
306 306 caps = set(caps)
307 307 caps.discard('bundle2')
308 308 return caps
309 309
310 310 def _applyrequirements(self, requirements):
311 311 self.requirements = requirements
312 312 self.sopener.options = dict((r, 1) for r in requirements
313 313 if r in self.openerreqs)
314 314 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
315 315 if chunkcachesize is not None:
316 316 self.sopener.options['chunkcachesize'] = chunkcachesize
317 317
318 318 def _writerequirements(self):
319 319 reqfile = self.opener("requires", "w")
320 320 for r in sorted(self.requirements):
321 321 reqfile.write("%s\n" % r)
322 322 reqfile.close()
323 323
324 324 def _checknested(self, path):
325 325 """Determine if path is a legal nested repository."""
326 326 if not path.startswith(self.root):
327 327 return False
328 328 subpath = path[len(self.root) + 1:]
329 329 normsubpath = util.pconvert(subpath)
330 330
331 331 # XXX: Checking against the current working copy is wrong in
332 332 # the sense that it can reject things like
333 333 #
334 334 # $ hg cat -r 10 sub/x.txt
335 335 #
336 336 # if sub/ is no longer a subrepository in the working copy
337 337 # parent revision.
338 338 #
339 339 # However, it can of course also allow things that would have
340 340 # been rejected before, such as the above cat command if sub/
341 341 # is a subrepository now, but was a normal directory before.
342 342 # The old path auditor would have rejected by mistake since it
343 343 # panics when it sees sub/.hg/.
344 344 #
345 345 # All in all, checking against the working copy seems sensible
346 346 # since we want to prevent access to nested repositories on
347 347 # the filesystem *now*.
348 348 ctx = self[None]
349 349 parts = util.splitpath(subpath)
350 350 while parts:
351 351 prefix = '/'.join(parts)
352 352 if prefix in ctx.substate:
353 353 if prefix == normsubpath:
354 354 return True
355 355 else:
356 356 sub = ctx.sub(prefix)
357 357 return sub.checknested(subpath[len(prefix) + 1:])
358 358 else:
359 359 parts.pop()
360 360 return False
361 361
362 362 def peer(self):
363 363 return localpeer(self) # not cached to avoid reference cycle
364 364
365 365 def unfiltered(self):
366 366 """Return unfiltered version of the repository
367 367
368 368 Intended to be overwritten by filtered repo."""
369 369 return self
370 370
371 371 def filtered(self, name):
372 372 """Return a filtered version of a repository"""
373 373 # build a new class with the mixin and the current class
374 374 # (possibly subclass of the repo)
375 375 class proxycls(repoview.repoview, self.unfiltered().__class__):
376 376 pass
377 377 return proxycls(self, name)
378 378
379 379 @repofilecache('bookmarks')
380 380 def _bookmarks(self):
381 381 return bookmarks.bmstore(self)
382 382
383 383 @repofilecache('bookmarks.current')
384 384 def _bookmarkcurrent(self):
385 385 return bookmarks.readcurrent(self)
386 386
387 387 def bookmarkheads(self, bookmark):
388 388 name = bookmark.split('@', 1)[0]
389 389 heads = []
390 390 for mark, n in self._bookmarks.iteritems():
391 391 if mark.split('@', 1)[0] == name:
392 392 heads.append(n)
393 393 return heads
394 394
395 395 @storecache('phaseroots')
396 396 def _phasecache(self):
397 397 return phases.phasecache(self, self._phasedefaults)
398 398
399 399 @storecache('obsstore')
400 400 def obsstore(self):
401 401 store = obsolete.obsstore(self.sopener)
402 402 if store and not obsolete._enabled:
403 403 # message is rare enough to not be translated
404 404 msg = 'obsolete feature not enabled but %i markers found!\n'
405 405 self.ui.warn(msg % len(list(store)))
406 406 return store
407 407
408 408 @storecache('00changelog.i')
409 409 def changelog(self):
410 410 c = changelog.changelog(self.sopener)
411 411 if 'HG_PENDING' in os.environ:
412 412 p = os.environ['HG_PENDING']
413 413 if p.startswith(self.root):
414 414 c.readpending('00changelog.i.a')
415 415 return c
416 416
417 417 @storecache('00manifest.i')
418 418 def manifest(self):
419 419 return manifest.manifest(self.sopener)
420 420
421 421 @repofilecache('dirstate')
422 422 def dirstate(self):
423 423 warned = [0]
424 424 def validate(node):
425 425 try:
426 426 self.changelog.rev(node)
427 427 return node
428 428 except error.LookupError:
429 429 if not warned[0]:
430 430 warned[0] = True
431 431 self.ui.warn(_("warning: ignoring unknown"
432 432 " working parent %s!\n") % short(node))
433 433 return nullid
434 434
435 435 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
436 436
437 437 def __getitem__(self, changeid):
438 438 if changeid is None:
439 439 return context.workingctx(self)
440 440 return context.changectx(self, changeid)
441 441
442 442 def __contains__(self, changeid):
443 443 try:
444 444 return bool(self.lookup(changeid))
445 445 except error.RepoLookupError:
446 446 return False
447 447
448 448 def __nonzero__(self):
449 449 return True
450 450
451 451 def __len__(self):
452 452 return len(self.changelog)
453 453
454 454 def __iter__(self):
455 455 return iter(self.changelog)
456 456
457 457 def revs(self, expr, *args):
458 458 '''Return a list of revisions matching the given revset'''
459 459 expr = revset.formatspec(expr, *args)
460 460 m = revset.match(None, expr)
461 461 return m(self, revset.spanset(self))
462 462
463 463 def set(self, expr, *args):
464 464 '''
465 465 Yield a context for each matching revision, after doing arg
466 466 replacement via revset.formatspec
467 467 '''
468 468 for r in self.revs(expr, *args):
469 469 yield self[r]
470 470
471 471 def url(self):
472 472 return 'file:' + self.root
473 473
474 474 def hook(self, name, throw=False, **args):
475 475 return hook.hook(self.ui, self, name, throw, **args)
476 476
477 477 @unfilteredmethod
478 478 def _tag(self, names, node, message, local, user, date, extra={}):
479 479 if isinstance(names, str):
480 480 names = (names,)
481 481
482 482 branches = self.branchmap()
483 483 for name in names:
484 484 self.hook('pretag', throw=True, node=hex(node), tag=name,
485 485 local=local)
486 486 if name in branches:
487 487 self.ui.warn(_("warning: tag %s conflicts with existing"
488 488 " branch name\n") % name)
489 489
490 490 def writetags(fp, names, munge, prevtags):
491 491 fp.seek(0, 2)
492 492 if prevtags and prevtags[-1] != '\n':
493 493 fp.write('\n')
494 494 for name in names:
495 495 m = munge and munge(name) or name
496 496 if (self._tagscache.tagtypes and
497 497 name in self._tagscache.tagtypes):
498 498 old = self.tags().get(name, nullid)
499 499 fp.write('%s %s\n' % (hex(old), m))
500 500 fp.write('%s %s\n' % (hex(node), m))
501 501 fp.close()
502 502
503 503 prevtags = ''
504 504 if local:
505 505 try:
506 506 fp = self.opener('localtags', 'r+')
507 507 except IOError:
508 508 fp = self.opener('localtags', 'a')
509 509 else:
510 510 prevtags = fp.read()
511 511
512 512 # local tags are stored in the current charset
513 513 writetags(fp, names, None, prevtags)
514 514 for name in names:
515 515 self.hook('tag', node=hex(node), tag=name, local=local)
516 516 return
517 517
518 518 try:
519 519 fp = self.wfile('.hgtags', 'rb+')
520 520 except IOError, e:
521 521 if e.errno != errno.ENOENT:
522 522 raise
523 523 fp = self.wfile('.hgtags', 'ab')
524 524 else:
525 525 prevtags = fp.read()
526 526
527 527 # committed tags are stored in UTF-8
528 528 writetags(fp, names, encoding.fromlocal, prevtags)
529 529
530 530 fp.close()
531 531
532 532 self.invalidatecaches()
533 533
534 534 if '.hgtags' not in self.dirstate:
535 535 self[None].add(['.hgtags'])
536 536
537 537 m = matchmod.exact(self.root, '', ['.hgtags'])
538 538 tagnode = self.commit(message, user, date, extra=extra, match=m)
539 539
540 540 for name in names:
541 541 self.hook('tag', node=hex(node), tag=name, local=local)
542 542
543 543 return tagnode
544 544
545 545 def tag(self, names, node, message, local, user, date):
546 546 '''tag a revision with one or more symbolic names.
547 547
548 548 names is a list of strings or, when adding a single tag, names may be a
549 549 string.
550 550
551 551 if local is True, the tags are stored in a per-repository file.
552 552 otherwise, they are stored in the .hgtags file, and a new
553 553 changeset is committed with the change.
554 554
555 555 keyword arguments:
556 556
557 557 local: whether to store tags in non-version-controlled file
558 558 (default False)
559 559
560 560 message: commit message to use if committing
561 561
562 562 user: name of user to use if committing
563 563
564 564 date: date tuple to use if committing'''
565 565
566 566 if not local:
567 567 for x in self.status()[:5]:
568 568 if '.hgtags' in x:
569 569 raise util.Abort(_('working copy of .hgtags is changed '
570 570 '(please commit .hgtags manually)'))
571 571
572 572 self.tags() # instantiate the cache
573 573 self._tag(names, node, message, local, user, date)
574 574
575 575 @filteredpropertycache
576 576 def _tagscache(self):
577 577 '''Returns a tagscache object that contains various tags related
578 578 caches.'''
579 579
580 580 # This simplifies its cache management by having one decorated
581 581 # function (this one) and the rest simply fetch things from it.
582 582 class tagscache(object):
583 583 def __init__(self):
584 584 # These two define the set of tags for this repository. tags
585 585 # maps tag name to node; tagtypes maps tag name to 'global' or
586 586 # 'local'. (Global tags are defined by .hgtags across all
587 587 # heads, and local tags are defined in .hg/localtags.)
588 588 # They constitute the in-memory cache of tags.
589 589 self.tags = self.tagtypes = None
590 590
591 591 self.nodetagscache = self.tagslist = None
592 592
593 593 cache = tagscache()
594 594 cache.tags, cache.tagtypes = self._findtags()
595 595
596 596 return cache
597 597
598 598 def tags(self):
599 599 '''return a mapping of tag to node'''
600 600 t = {}
601 601 if self.changelog.filteredrevs:
602 602 tags, tt = self._findtags()
603 603 else:
604 604 tags = self._tagscache.tags
605 605 for k, v in tags.iteritems():
606 606 try:
607 607 # ignore tags to unknown nodes
608 608 self.changelog.rev(v)
609 609 t[k] = v
610 610 except (error.LookupError, ValueError):
611 611 pass
612 612 return t
613 613
614 614 def _findtags(self):
615 615 '''Do the hard work of finding tags. Return a pair of dicts
616 616 (tags, tagtypes) where tags maps tag name to node, and tagtypes
617 617 maps tag name to a string like \'global\' or \'local\'.
618 618 Subclasses or extensions are free to add their own tags, but
619 619 should be aware that the returned dicts will be retained for the
620 620 duration of the localrepo object.'''
621 621
622 622 # XXX what tagtype should subclasses/extensions use? Currently
623 623 # mq and bookmarks add tags, but do not set the tagtype at all.
624 624 # Should each extension invent its own tag type? Should there
625 625 # be one tagtype for all such "virtual" tags? Or is the status
626 626 # quo fine?
627 627
628 628 alltags = {} # map tag name to (node, hist)
629 629 tagtypes = {}
630 630
631 631 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
632 632 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
633 633
634 634 # Build the return dicts. Have to re-encode tag names because
635 635 # the tags module always uses UTF-8 (in order not to lose info
636 636 # writing to the cache), but the rest of Mercurial wants them in
637 637 # local encoding.
638 638 tags = {}
639 639 for (name, (node, hist)) in alltags.iteritems():
640 640 if node != nullid:
641 641 tags[encoding.tolocal(name)] = node
642 642 tags['tip'] = self.changelog.tip()
643 643 tagtypes = dict([(encoding.tolocal(name), value)
644 644 for (name, value) in tagtypes.iteritems()])
645 645 return (tags, tagtypes)
646 646
647 647 def tagtype(self, tagname):
648 648 '''
649 649 return the type of the given tag. result can be:
650 650
651 651 'local' : a local tag
652 652 'global' : a global tag
653 653 None : tag does not exist
654 654 '''
655 655
656 656 return self._tagscache.tagtypes.get(tagname)
657 657
658 658 def tagslist(self):
659 659 '''return a list of tags ordered by revision'''
660 660 if not self._tagscache.tagslist:
661 661 l = []
662 662 for t, n in self.tags().iteritems():
663 663 r = self.changelog.rev(n)
664 664 l.append((r, t, n))
665 665 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
666 666
667 667 return self._tagscache.tagslist
668 668
669 669 def nodetags(self, node):
670 670 '''return the tags associated with a node'''
671 671 if not self._tagscache.nodetagscache:
672 672 nodetagscache = {}
673 673 for t, n in self._tagscache.tags.iteritems():
674 674 nodetagscache.setdefault(n, []).append(t)
675 675 for tags in nodetagscache.itervalues():
676 676 tags.sort()
677 677 self._tagscache.nodetagscache = nodetagscache
678 678 return self._tagscache.nodetagscache.get(node, [])
679 679
680 680 def nodebookmarks(self, node):
681 681 marks = []
682 682 for bookmark, n in self._bookmarks.iteritems():
683 683 if n == node:
684 684 marks.append(bookmark)
685 685 return sorted(marks)
686 686
687 687 def branchmap(self):
688 688 '''returns a dictionary {branch: [branchheads]} with branchheads
689 689 ordered by increasing revision number'''
690 690 branchmap.updatecache(self)
691 691 return self._branchcaches[self.filtername]
692 692
693 693 def branchtip(self, branch):
694 694 '''return the tip node for a given branch'''
695 695 try:
696 696 return self.branchmap().branchtip(branch)
697 697 except KeyError:
698 698 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
699 699
700 700 def lookup(self, key):
701 701 return self[key].node()
702 702
703 703 def lookupbranch(self, key, remote=None):
704 704 repo = remote or self
705 705 if key in repo.branchmap():
706 706 return key
707 707
708 708 repo = (remote and remote.local()) and remote or self
709 709 return repo[key].branch()
710 710
711 711 def known(self, nodes):
712 712 nm = self.changelog.nodemap
713 713 pc = self._phasecache
714 714 result = []
715 715 for n in nodes:
716 716 r = nm.get(n)
717 717 resp = not (r is None or pc.phase(self, r) >= phases.secret)
718 718 result.append(resp)
719 719 return result
720 720
721 721 def local(self):
722 722 return self
723 723
724 724 def cancopy(self):
725 725 # so statichttprepo's override of local() works
726 726 if not self.local():
727 727 return False
728 728 if not self.ui.configbool('phases', 'publish', True):
729 729 return True
730 730 # if publishing we can't copy if there is filtered content
731 731 return not self.filtered('visible').changelog.filteredrevs
732 732
733 733 def join(self, f):
734 734 return os.path.join(self.path, f)
735 735
736 736 def wjoin(self, f):
737 737 return os.path.join(self.root, f)
738 738
739 739 def file(self, f):
740 740 if f[0] == '/':
741 741 f = f[1:]
742 742 return filelog.filelog(self.sopener, f)
743 743
744 744 def changectx(self, changeid):
745 745 return self[changeid]
746 746
747 747 def parents(self, changeid=None):
748 748 '''get list of changectxs for parents of changeid'''
749 749 return self[changeid].parents()
750 750
751 751 def setparents(self, p1, p2=nullid):
752 752 copies = self.dirstate.setparents(p1, p2)
753 753 pctx = self[p1]
754 754 if copies:
755 755 # Adjust copy records, the dirstate cannot do it, it
756 756 # requires access to parents manifests. Preserve them
757 757 # only for entries added to first parent.
758 758 for f in copies:
759 759 if f not in pctx and copies[f] in pctx:
760 760 self.dirstate.copy(copies[f], f)
761 761 if p2 == nullid:
762 762 for f, s in sorted(self.dirstate.copies().items()):
763 763 if f not in pctx and s not in pctx:
764 764 self.dirstate.copy(None, f)
765 765
766 766 def filectx(self, path, changeid=None, fileid=None):
767 767 """changeid can be a changeset revision, node, or tag.
768 768 fileid can be a file revision or node."""
769 769 return context.filectx(self, path, changeid, fileid)
770 770
771 771 def getcwd(self):
772 772 return self.dirstate.getcwd()
773 773
774 774 def pathto(self, f, cwd=None):
775 775 return self.dirstate.pathto(f, cwd)
776 776
777 777 def wfile(self, f, mode='r'):
778 778 return self.wopener(f, mode)
779 779
780 780 def _link(self, f):
781 781 return self.wvfs.islink(f)
782 782
783 783 def _loadfilter(self, filter):
784 784 if filter not in self.filterpats:
785 785 l = []
786 786 for pat, cmd in self.ui.configitems(filter):
787 787 if cmd == '!':
788 788 continue
789 789 mf = matchmod.match(self.root, '', [pat])
790 790 fn = None
791 791 params = cmd
792 792 for name, filterfn in self._datafilters.iteritems():
793 793 if cmd.startswith(name):
794 794 fn = filterfn
795 795 params = cmd[len(name):].lstrip()
796 796 break
797 797 if not fn:
798 798 fn = lambda s, c, **kwargs: util.filter(s, c)
799 799 # Wrap old filters not supporting keyword arguments
800 800 if not inspect.getargspec(fn)[2]:
801 801 oldfn = fn
802 802 fn = lambda s, c, **kwargs: oldfn(s, c)
803 803 l.append((mf, fn, params))
804 804 self.filterpats[filter] = l
805 805 return self.filterpats[filter]
806 806
807 807 def _filter(self, filterpats, filename, data):
808 808 for mf, fn, cmd in filterpats:
809 809 if mf(filename):
810 810 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
811 811 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
812 812 break
813 813
814 814 return data
815 815
816 816 @unfilteredpropertycache
817 817 def _encodefilterpats(self):
818 818 return self._loadfilter('encode')
819 819
820 820 @unfilteredpropertycache
821 821 def _decodefilterpats(self):
822 822 return self._loadfilter('decode')
823 823
824 824 def adddatafilter(self, name, filter):
825 825 self._datafilters[name] = filter
826 826
827 827 def wread(self, filename):
828 828 if self._link(filename):
829 829 data = self.wvfs.readlink(filename)
830 830 else:
831 831 data = self.wopener.read(filename)
832 832 return self._filter(self._encodefilterpats, filename, data)
833 833
834 834 def wwrite(self, filename, data, flags):
835 835 data = self._filter(self._decodefilterpats, filename, data)
836 836 if 'l' in flags:
837 837 self.wopener.symlink(data, filename)
838 838 else:
839 839 self.wopener.write(filename, data)
840 840 if 'x' in flags:
841 841 self.wvfs.setflags(filename, False, True)
842 842
843 843 def wwritedata(self, filename, data):
844 844 return self._filter(self._decodefilterpats, filename, data)
845 845
846 846 def transaction(self, desc, report=None):
847 847 tr = self._transref and self._transref() or None
848 848 if tr and tr.running():
849 849 return tr.nest()
850 850
851 851 # abort here if the journal already exists
852 852 if self.svfs.exists("journal"):
853 853 raise error.RepoError(
854 854 _("abandoned transaction found - run hg recover"))
855 855
856 856 def onclose():
857 857 self.store.write(tr)
858 858
859 859 self._writejournal(desc)
860 860 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
861 861 rp = report and report or self.ui.warn
862 862 tr = transaction.transaction(rp, self.sopener,
863 863 "journal",
864 864 aftertrans(renames),
865 865 self.store.createmode,
866 866 onclose)
867 867 self._transref = weakref.ref(tr)
868 868 return tr
869 869
870 870 def _journalfiles(self):
871 871 return ((self.svfs, 'journal'),
872 872 (self.vfs, 'journal.dirstate'),
873 873 (self.vfs, 'journal.branch'),
874 874 (self.vfs, 'journal.desc'),
875 875 (self.vfs, 'journal.bookmarks'),
876 876 (self.svfs, 'journal.phaseroots'))
877 877
878 878 def undofiles(self):
879 879 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
880 880
881 881 def _writejournal(self, desc):
882 882 self.opener.write("journal.dirstate",
883 883 self.opener.tryread("dirstate"))
884 884 self.opener.write("journal.branch",
885 885 encoding.fromlocal(self.dirstate.branch()))
886 886 self.opener.write("journal.desc",
887 887 "%d\n%s\n" % (len(self), desc))
888 888 self.opener.write("journal.bookmarks",
889 889 self.opener.tryread("bookmarks"))
890 890 self.sopener.write("journal.phaseroots",
891 891 self.sopener.tryread("phaseroots"))
892 892
893 893 def recover(self):
894 894 lock = self.lock()
895 895 try:
896 896 if self.svfs.exists("journal"):
897 897 self.ui.status(_("rolling back interrupted transaction\n"))
898 898 transaction.rollback(self.sopener, "journal",
899 899 self.ui.warn)
900 900 self.invalidate()
901 901 return True
902 902 else:
903 903 self.ui.warn(_("no interrupted transaction available\n"))
904 904 return False
905 905 finally:
906 906 lock.release()
907 907
908 908 def rollback(self, dryrun=False, force=False):
909 909 wlock = lock = None
910 910 try:
911 911 wlock = self.wlock()
912 912 lock = self.lock()
913 913 if self.svfs.exists("undo"):
914 914 return self._rollback(dryrun, force)
915 915 else:
916 916 self.ui.warn(_("no rollback information available\n"))
917 917 return 1
918 918 finally:
919 919 release(lock, wlock)
920 920
921 921 @unfilteredmethod # Until we get smarter cache management
922 922 def _rollback(self, dryrun, force):
923 923 ui = self.ui
924 924 try:
925 925 args = self.opener.read('undo.desc').splitlines()
926 926 (oldlen, desc, detail) = (int(args[0]), args[1], None)
927 927 if len(args) >= 3:
928 928 detail = args[2]
929 929 oldtip = oldlen - 1
930 930
931 931 if detail and ui.verbose:
932 932 msg = (_('repository tip rolled back to revision %s'
933 933 ' (undo %s: %s)\n')
934 934 % (oldtip, desc, detail))
935 935 else:
936 936 msg = (_('repository tip rolled back to revision %s'
937 937 ' (undo %s)\n')
938 938 % (oldtip, desc))
939 939 except IOError:
940 940 msg = _('rolling back unknown transaction\n')
941 941 desc = None
942 942
943 943 if not force and self['.'] != self['tip'] and desc == 'commit':
944 944 raise util.Abort(
945 945 _('rollback of last commit while not checked out '
946 946 'may lose data'), hint=_('use -f to force'))
947 947
948 948 ui.status(msg)
949 949 if dryrun:
950 950 return 0
951 951
952 952 parents = self.dirstate.parents()
953 953 self.destroying()
954 954 transaction.rollback(self.sopener, 'undo', ui.warn)
955 955 if self.vfs.exists('undo.bookmarks'):
956 956 self.vfs.rename('undo.bookmarks', 'bookmarks')
957 957 if self.svfs.exists('undo.phaseroots'):
958 958 self.svfs.rename('undo.phaseroots', 'phaseroots')
959 959 self.invalidate()
960 960
961 961 parentgone = (parents[0] not in self.changelog.nodemap or
962 962 parents[1] not in self.changelog.nodemap)
963 963 if parentgone:
964 964 self.vfs.rename('undo.dirstate', 'dirstate')
965 965 try:
966 966 branch = self.opener.read('undo.branch')
967 967 self.dirstate.setbranch(encoding.tolocal(branch))
968 968 except IOError:
969 969 ui.warn(_('named branch could not be reset: '
970 970 'current branch is still \'%s\'\n')
971 971 % self.dirstate.branch())
972 972
973 973 self.dirstate.invalidate()
974 974 parents = tuple([p.rev() for p in self.parents()])
975 975 if len(parents) > 1:
976 976 ui.status(_('working directory now based on '
977 977 'revisions %d and %d\n') % parents)
978 978 else:
979 979 ui.status(_('working directory now based on '
980 980 'revision %d\n') % parents)
981 981 # TODO: if we know which new heads may result from this rollback, pass
982 982 # them to destroy(), which will prevent the branchhead cache from being
983 983 # invalidated.
984 984 self.destroyed()
985 985 return 0
986 986
987 987 def invalidatecaches(self):
988 988
989 989 if '_tagscache' in vars(self):
990 990 # can't use delattr on proxy
991 991 del self.__dict__['_tagscache']
992 992
993 993 self.unfiltered()._branchcaches.clear()
994 994 self.invalidatevolatilesets()
995 995
996 996 def invalidatevolatilesets(self):
997 997 self.filteredrevcache.clear()
998 998 obsolete.clearobscaches(self)
999 999
1000 1000 def invalidatedirstate(self):
1001 1001 '''Invalidates the dirstate, causing the next call to dirstate
1002 1002 to check if it was modified since the last time it was read,
1003 1003 rereading it if it has.
1004 1004
1005 1005 This is different to dirstate.invalidate() that it doesn't always
1006 1006 rereads the dirstate. Use dirstate.invalidate() if you want to
1007 1007 explicitly read the dirstate again (i.e. restoring it to a previous
1008 1008 known good state).'''
1009 1009 if hasunfilteredcache(self, 'dirstate'):
1010 1010 for k in self.dirstate._filecache:
1011 1011 try:
1012 1012 delattr(self.dirstate, k)
1013 1013 except AttributeError:
1014 1014 pass
1015 1015 delattr(self.unfiltered(), 'dirstate')
1016 1016
1017 1017 def invalidate(self):
1018 1018 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1019 1019 for k in self._filecache:
1020 1020 # dirstate is invalidated separately in invalidatedirstate()
1021 1021 if k == 'dirstate':
1022 1022 continue
1023 1023
1024 1024 try:
1025 1025 delattr(unfiltered, k)
1026 1026 except AttributeError:
1027 1027 pass
1028 1028 self.invalidatecaches()
1029 1029 self.store.invalidatecaches()
1030 1030
1031 1031 def invalidateall(self):
1032 1032 '''Fully invalidates both store and non-store parts, causing the
1033 1033 subsequent operation to reread any outside changes.'''
1034 1034 # extension should hook this to invalidate its caches
1035 1035 self.invalidate()
1036 1036 self.invalidatedirstate()
1037 1037
1038 1038 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1039 1039 try:
1040 1040 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1041 1041 except error.LockHeld, inst:
1042 1042 if not wait:
1043 1043 raise
1044 1044 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1045 1045 (desc, inst.locker))
1046 1046 # default to 600 seconds timeout
1047 1047 l = lockmod.lock(vfs, lockname,
1048 1048 int(self.ui.config("ui", "timeout", "600")),
1049 1049 releasefn, desc=desc)
1050 1050 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1051 1051 if acquirefn:
1052 1052 acquirefn()
1053 1053 return l
1054 1054
1055 1055 def _afterlock(self, callback):
1056 1056 """add a callback to the current repository lock.
1057 1057
1058 1058 The callback will be executed on lock release."""
1059 1059 l = self._lockref and self._lockref()
1060 1060 if l:
1061 1061 l.postrelease.append(callback)
1062 1062 else:
1063 1063 callback()
1064 1064
1065 1065 def lock(self, wait=True):
1066 1066 '''Lock the repository store (.hg/store) and return a weak reference
1067 1067 to the lock. Use this before modifying the store (e.g. committing or
1068 1068 stripping). If you are opening a transaction, get a lock as well.)'''
1069 1069 l = self._lockref and self._lockref()
1070 1070 if l is not None and l.held:
1071 1071 l.lock()
1072 1072 return l
1073 1073
1074 1074 def unlock():
1075 1075 if hasunfilteredcache(self, '_phasecache'):
1076 1076 self._phasecache.write()
1077 1077 for k, ce in self._filecache.items():
1078 1078 if k == 'dirstate' or k not in self.__dict__:
1079 1079 continue
1080 1080 ce.refresh()
1081 1081
1082 1082 l = self._lock(self.svfs, "lock", wait, unlock,
1083 1083 self.invalidate, _('repository %s') % self.origroot)
1084 1084 self._lockref = weakref.ref(l)
1085 1085 return l
1086 1086
1087 1087 def wlock(self, wait=True):
1088 1088 '''Lock the non-store parts of the repository (everything under
1089 1089 .hg except .hg/store) and return a weak reference to the lock.
1090 1090 Use this before modifying files in .hg.'''
1091 1091 l = self._wlockref and self._wlockref()
1092 1092 if l is not None and l.held:
1093 1093 l.lock()
1094 1094 return l
1095 1095
1096 1096 def unlock():
1097 1097 self.dirstate.write()
1098 1098 self._filecache['dirstate'].refresh()
1099 1099
1100 1100 l = self._lock(self.vfs, "wlock", wait, unlock,
1101 1101 self.invalidatedirstate, _('working directory of %s') %
1102 1102 self.origroot)
1103 1103 self._wlockref = weakref.ref(l)
1104 1104 return l
1105 1105
1106 1106 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1107 1107 """
1108 1108 commit an individual file as part of a larger transaction
1109 1109 """
1110 1110
1111 1111 fname = fctx.path()
1112 1112 text = fctx.data()
1113 1113 flog = self.file(fname)
1114 1114 fparent1 = manifest1.get(fname, nullid)
1115 1115 fparent2 = fparent2o = manifest2.get(fname, nullid)
1116 1116
1117 1117 meta = {}
1118 1118 copy = fctx.renamed()
1119 1119 if copy and copy[0] != fname:
1120 1120 # Mark the new revision of this file as a copy of another
1121 1121 # file. This copy data will effectively act as a parent
1122 1122 # of this new revision. If this is a merge, the first
1123 1123 # parent will be the nullid (meaning "look up the copy data")
1124 1124 # and the second one will be the other parent. For example:
1125 1125 #
1126 1126 # 0 --- 1 --- 3 rev1 changes file foo
1127 1127 # \ / rev2 renames foo to bar and changes it
1128 1128 # \- 2 -/ rev3 should have bar with all changes and
1129 1129 # should record that bar descends from
1130 1130 # bar in rev2 and foo in rev1
1131 1131 #
1132 1132 # this allows this merge to succeed:
1133 1133 #
1134 1134 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1135 1135 # \ / merging rev3 and rev4 should use bar@rev2
1136 1136 # \- 2 --- 4 as the merge base
1137 1137 #
1138 1138
1139 1139 cfname = copy[0]
1140 1140 crev = manifest1.get(cfname)
1141 1141 newfparent = fparent2
1142 1142
1143 1143 if manifest2: # branch merge
1144 1144 if fparent2 == nullid or crev is None: # copied on remote side
1145 1145 if cfname in manifest2:
1146 1146 crev = manifest2[cfname]
1147 1147 newfparent = fparent1
1148 1148
1149 1149 # find source in nearest ancestor if we've lost track
1150 1150 if not crev:
1151 1151 self.ui.debug(" %s: searching for copy revision for %s\n" %
1152 1152 (fname, cfname))
1153 1153 for ancestor in self[None].ancestors():
1154 1154 if cfname in ancestor:
1155 1155 crev = ancestor[cfname].filenode()
1156 1156 break
1157 1157
1158 1158 if crev:
1159 1159 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1160 1160 meta["copy"] = cfname
1161 1161 meta["copyrev"] = hex(crev)
1162 1162 fparent1, fparent2 = nullid, newfparent
1163 1163 else:
1164 1164 self.ui.warn(_("warning: can't find ancestor for '%s' "
1165 1165 "copied from '%s'!\n") % (fname, cfname))
1166 1166
1167 1167 elif fparent1 == nullid:
1168 1168 fparent1, fparent2 = fparent2, nullid
1169 1169 elif fparent2 != nullid:
1170 1170 # is one parent an ancestor of the other?
1171 fparentancestors = flog.commonancestors(fparent1, fparent2)
1171 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1172 1172 if fparent1 in fparentancestors:
1173 1173 fparent1, fparent2 = fparent2, nullid
1174 1174 elif fparent2 in fparentancestors:
1175 1175 fparent2 = nullid
1176 1176
1177 1177 # is the file changed?
1178 1178 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1179 1179 changelist.append(fname)
1180 1180 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1181 1181
1182 1182 # are just the flags changed during merge?
1183 1183 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1184 1184 changelist.append(fname)
1185 1185
1186 1186 return fparent1
1187 1187
1188 1188 @unfilteredmethod
1189 1189 def commit(self, text="", user=None, date=None, match=None, force=False,
1190 1190 editor=False, extra={}):
1191 1191 """Add a new revision to current repository.
1192 1192
1193 1193 Revision information is gathered from the working directory,
1194 1194 match can be used to filter the committed files. If editor is
1195 1195 supplied, it is called to get a commit message.
1196 1196 """
1197 1197
1198 1198 def fail(f, msg):
1199 1199 raise util.Abort('%s: %s' % (f, msg))
1200 1200
1201 1201 if not match:
1202 1202 match = matchmod.always(self.root, '')
1203 1203
1204 1204 if not force:
1205 1205 vdirs = []
1206 1206 match.explicitdir = vdirs.append
1207 1207 match.bad = fail
1208 1208
1209 1209 wlock = self.wlock()
1210 1210 try:
1211 1211 wctx = self[None]
1212 1212 merge = len(wctx.parents()) > 1
1213 1213
1214 1214 if (not force and merge and match and
1215 1215 (match.files() or match.anypats())):
1216 1216 raise util.Abort(_('cannot partially commit a merge '
1217 1217 '(do not specify files or patterns)'))
1218 1218
1219 1219 changes = self.status(match=match, clean=force)
1220 1220 if force:
1221 1221 changes[0].extend(changes[6]) # mq may commit unchanged files
1222 1222
1223 1223 # check subrepos
1224 1224 subs = []
1225 1225 commitsubs = set()
1226 1226 newstate = wctx.substate.copy()
1227 1227 # only manage subrepos and .hgsubstate if .hgsub is present
1228 1228 if '.hgsub' in wctx:
1229 1229 # we'll decide whether to track this ourselves, thanks
1230 1230 for c in changes[:3]:
1231 1231 if '.hgsubstate' in c:
1232 1232 c.remove('.hgsubstate')
1233 1233
1234 1234 # compare current state to last committed state
1235 1235 # build new substate based on last committed state
1236 1236 oldstate = wctx.p1().substate
1237 1237 for s in sorted(newstate.keys()):
1238 1238 if not match(s):
1239 1239 # ignore working copy, use old state if present
1240 1240 if s in oldstate:
1241 1241 newstate[s] = oldstate[s]
1242 1242 continue
1243 1243 if not force:
1244 1244 raise util.Abort(
1245 1245 _("commit with new subrepo %s excluded") % s)
1246 1246 if wctx.sub(s).dirty(True):
1247 1247 if not self.ui.configbool('ui', 'commitsubrepos'):
1248 1248 raise util.Abort(
1249 1249 _("uncommitted changes in subrepo %s") % s,
1250 1250 hint=_("use --subrepos for recursive commit"))
1251 1251 subs.append(s)
1252 1252 commitsubs.add(s)
1253 1253 else:
1254 1254 bs = wctx.sub(s).basestate()
1255 1255 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1256 1256 if oldstate.get(s, (None, None, None))[1] != bs:
1257 1257 subs.append(s)
1258 1258
1259 1259 # check for removed subrepos
1260 1260 for p in wctx.parents():
1261 1261 r = [s for s in p.substate if s not in newstate]
1262 1262 subs += [s for s in r if match(s)]
1263 1263 if subs:
1264 1264 if (not match('.hgsub') and
1265 1265 '.hgsub' in (wctx.modified() + wctx.added())):
1266 1266 raise util.Abort(
1267 1267 _("can't commit subrepos without .hgsub"))
1268 1268 changes[0].insert(0, '.hgsubstate')
1269 1269
1270 1270 elif '.hgsub' in changes[2]:
1271 1271 # clean up .hgsubstate when .hgsub is removed
1272 1272 if ('.hgsubstate' in wctx and
1273 1273 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1274 1274 changes[2].insert(0, '.hgsubstate')
1275 1275
1276 1276 # make sure all explicit patterns are matched
1277 1277 if not force and match.files():
1278 1278 matched = set(changes[0] + changes[1] + changes[2])
1279 1279
1280 1280 for f in match.files():
1281 1281 f = self.dirstate.normalize(f)
1282 1282 if f == '.' or f in matched or f in wctx.substate:
1283 1283 continue
1284 1284 if f in changes[3]: # missing
1285 1285 fail(f, _('file not found!'))
1286 1286 if f in vdirs: # visited directory
1287 1287 d = f + '/'
1288 1288 for mf in matched:
1289 1289 if mf.startswith(d):
1290 1290 break
1291 1291 else:
1292 1292 fail(f, _("no match under directory!"))
1293 1293 elif f not in self.dirstate:
1294 1294 fail(f, _("file not tracked!"))
1295 1295
1296 1296 cctx = context.workingctx(self, text, user, date, extra, changes)
1297 1297
1298 1298 if (not force and not extra.get("close") and not merge
1299 1299 and not cctx.files()
1300 1300 and wctx.branch() == wctx.p1().branch()):
1301 1301 return None
1302 1302
1303 1303 if merge and cctx.deleted():
1304 1304 raise util.Abort(_("cannot commit merge with missing files"))
1305 1305
1306 1306 ms = mergemod.mergestate(self)
1307 1307 for f in changes[0]:
1308 1308 if f in ms and ms[f] == 'u':
1309 1309 raise util.Abort(_("unresolved merge conflicts "
1310 1310 "(see hg help resolve)"))
1311 1311
1312 1312 if editor:
1313 1313 cctx._text = editor(self, cctx, subs)
1314 1314 edited = (text != cctx._text)
1315 1315
1316 1316 # Save commit message in case this transaction gets rolled back
1317 1317 # (e.g. by a pretxncommit hook). Leave the content alone on
1318 1318 # the assumption that the user will use the same editor again.
1319 1319 msgfn = self.savecommitmessage(cctx._text)
1320 1320
1321 1321 # commit subs and write new state
1322 1322 if subs:
1323 1323 for s in sorted(commitsubs):
1324 1324 sub = wctx.sub(s)
1325 1325 self.ui.status(_('committing subrepository %s\n') %
1326 1326 subrepo.subrelpath(sub))
1327 1327 sr = sub.commit(cctx._text, user, date)
1328 1328 newstate[s] = (newstate[s][0], sr)
1329 1329 subrepo.writestate(self, newstate)
1330 1330
1331 1331 p1, p2 = self.dirstate.parents()
1332 1332 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1333 1333 try:
1334 1334 self.hook("precommit", throw=True, parent1=hookp1,
1335 1335 parent2=hookp2)
1336 1336 ret = self.commitctx(cctx, True)
1337 1337 except: # re-raises
1338 1338 if edited:
1339 1339 self.ui.write(
1340 1340 _('note: commit message saved in %s\n') % msgfn)
1341 1341 raise
1342 1342
1343 1343 # update bookmarks, dirstate and mergestate
1344 1344 bookmarks.update(self, [p1, p2], ret)
1345 1345 cctx.markcommitted(ret)
1346 1346 ms.reset()
1347 1347 finally:
1348 1348 wlock.release()
1349 1349
1350 1350 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1351 1351 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1352 1352 self._afterlock(commithook)
1353 1353 return ret
1354 1354
1355 1355 @unfilteredmethod
1356 1356 def commitctx(self, ctx, error=False):
1357 1357 """Add a new revision to current repository.
1358 1358 Revision information is passed via the context argument.
1359 1359 """
1360 1360
1361 1361 tr = lock = None
1362 1362 removed = list(ctx.removed())
1363 1363 p1, p2 = ctx.p1(), ctx.p2()
1364 1364 user = ctx.user()
1365 1365
1366 1366 lock = self.lock()
1367 1367 try:
1368 1368 tr = self.transaction("commit")
1369 1369 trp = weakref.proxy(tr)
1370 1370
1371 1371 if ctx.files():
1372 1372 m1 = p1.manifest().copy()
1373 1373 m2 = p2.manifest()
1374 1374
1375 1375 # check in files
1376 1376 new = {}
1377 1377 changed = []
1378 1378 linkrev = len(self)
1379 1379 for f in sorted(ctx.modified() + ctx.added()):
1380 1380 self.ui.note(f + "\n")
1381 1381 try:
1382 1382 fctx = ctx[f]
1383 1383 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1384 1384 changed)
1385 1385 m1.set(f, fctx.flags())
1386 1386 except OSError, inst:
1387 1387 self.ui.warn(_("trouble committing %s!\n") % f)
1388 1388 raise
1389 1389 except IOError, inst:
1390 1390 errcode = getattr(inst, 'errno', errno.ENOENT)
1391 1391 if error or errcode and errcode != errno.ENOENT:
1392 1392 self.ui.warn(_("trouble committing %s!\n") % f)
1393 1393 raise
1394 1394 else:
1395 1395 removed.append(f)
1396 1396
1397 1397 # update manifest
1398 1398 m1.update(new)
1399 1399 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1400 1400 drop = [f for f in removed if f in m1]
1401 1401 for f in drop:
1402 1402 del m1[f]
1403 1403 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1404 1404 p2.manifestnode(), (new, drop))
1405 1405 files = changed + removed
1406 1406 else:
1407 1407 mn = p1.manifestnode()
1408 1408 files = []
1409 1409
1410 1410 # update changelog
1411 1411 self.changelog.delayupdate()
1412 1412 n = self.changelog.add(mn, files, ctx.description(),
1413 1413 trp, p1.node(), p2.node(),
1414 1414 user, ctx.date(), ctx.extra().copy())
1415 1415 p = lambda: self.changelog.writepending() and self.root or ""
1416 1416 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1417 1417 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1418 1418 parent2=xp2, pending=p)
1419 1419 self.changelog.finalize(trp)
1420 1420 # set the new commit is proper phase
1421 1421 targetphase = subrepo.newcommitphase(self.ui, ctx)
1422 1422 if targetphase:
1423 1423 # retract boundary do not alter parent changeset.
1424 1424 # if a parent have higher the resulting phase will
1425 1425 # be compliant anyway
1426 1426 #
1427 1427 # if minimal phase was 0 we don't need to retract anything
1428 1428 phases.retractboundary(self, targetphase, [n])
1429 1429 tr.close()
1430 1430 branchmap.updatecache(self.filtered('served'))
1431 1431 return n
1432 1432 finally:
1433 1433 if tr:
1434 1434 tr.release()
1435 1435 lock.release()
1436 1436
1437 1437 @unfilteredmethod
1438 1438 def destroying(self):
1439 1439 '''Inform the repository that nodes are about to be destroyed.
1440 1440 Intended for use by strip and rollback, so there's a common
1441 1441 place for anything that has to be done before destroying history.
1442 1442
1443 1443 This is mostly useful for saving state that is in memory and waiting
1444 1444 to be flushed when the current lock is released. Because a call to
1445 1445 destroyed is imminent, the repo will be invalidated causing those
1446 1446 changes to stay in memory (waiting for the next unlock), or vanish
1447 1447 completely.
1448 1448 '''
1449 1449 # When using the same lock to commit and strip, the phasecache is left
1450 1450 # dirty after committing. Then when we strip, the repo is invalidated,
1451 1451 # causing those changes to disappear.
1452 1452 if '_phasecache' in vars(self):
1453 1453 self._phasecache.write()
1454 1454
1455 1455 @unfilteredmethod
1456 1456 def destroyed(self):
1457 1457 '''Inform the repository that nodes have been destroyed.
1458 1458 Intended for use by strip and rollback, so there's a common
1459 1459 place for anything that has to be done after destroying history.
1460 1460 '''
1461 1461 # When one tries to:
1462 1462 # 1) destroy nodes thus calling this method (e.g. strip)
1463 1463 # 2) use phasecache somewhere (e.g. commit)
1464 1464 #
1465 1465 # then 2) will fail because the phasecache contains nodes that were
1466 1466 # removed. We can either remove phasecache from the filecache,
1467 1467 # causing it to reload next time it is accessed, or simply filter
1468 1468 # the removed nodes now and write the updated cache.
1469 1469 self._phasecache.filterunknown(self)
1470 1470 self._phasecache.write()
1471 1471
1472 1472 # update the 'served' branch cache to help read only server process
1473 1473 # Thanks to branchcache collaboration this is done from the nearest
1474 1474 # filtered subset and it is expected to be fast.
1475 1475 branchmap.updatecache(self.filtered('served'))
1476 1476
1477 1477 # Ensure the persistent tag cache is updated. Doing it now
1478 1478 # means that the tag cache only has to worry about destroyed
1479 1479 # heads immediately after a strip/rollback. That in turn
1480 1480 # guarantees that "cachetip == currenttip" (comparing both rev
1481 1481 # and node) always means no nodes have been added or destroyed.
1482 1482
1483 1483 # XXX this is suboptimal when qrefresh'ing: we strip the current
1484 1484 # head, refresh the tag cache, then immediately add a new head.
1485 1485 # But I think doing it this way is necessary for the "instant
1486 1486 # tag cache retrieval" case to work.
1487 1487 self.invalidate()
1488 1488
1489 1489 def walk(self, match, node=None):
1490 1490 '''
1491 1491 walk recursively through the directory tree or a given
1492 1492 changeset, finding all files matched by the match
1493 1493 function
1494 1494 '''
1495 1495 return self[node].walk(match)
1496 1496
1497 1497 def status(self, node1='.', node2=None, match=None,
1498 1498 ignored=False, clean=False, unknown=False,
1499 1499 listsubrepos=False):
1500 1500 """return status of files between two nodes or node and working
1501 1501 directory.
1502 1502
1503 1503 If node1 is None, use the first dirstate parent instead.
1504 1504 If node2 is None, compare node1 with working directory.
1505 1505 """
1506 1506
1507 1507 def mfmatches(ctx):
1508 1508 mf = ctx.manifest().copy()
1509 1509 if match.always():
1510 1510 return mf
1511 1511 for fn in mf.keys():
1512 1512 if not match(fn):
1513 1513 del mf[fn]
1514 1514 return mf
1515 1515
1516 1516 ctx1 = self[node1]
1517 1517 ctx2 = self[node2]
1518 1518
1519 1519 working = ctx2.rev() is None
1520 1520 parentworking = working and ctx1 == self['.']
1521 1521 match = match or matchmod.always(self.root, self.getcwd())
1522 1522 listignored, listclean, listunknown = ignored, clean, unknown
1523 1523
1524 1524 # load earliest manifest first for caching reasons
1525 1525 if not working and ctx2.rev() < ctx1.rev():
1526 1526 ctx2.manifest()
1527 1527
1528 1528 if not parentworking:
1529 1529 def bad(f, msg):
1530 1530 # 'f' may be a directory pattern from 'match.files()',
1531 1531 # so 'f not in ctx1' is not enough
1532 1532 if f not in ctx1 and f not in ctx1.dirs():
1533 1533 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1534 1534 match.bad = bad
1535 1535
1536 1536 if working: # we need to scan the working dir
1537 1537 subrepos = []
1538 1538 if '.hgsub' in self.dirstate:
1539 1539 subrepos = sorted(ctx2.substate)
1540 1540 s = self.dirstate.status(match, subrepos, listignored,
1541 1541 listclean, listunknown)
1542 1542 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1543 1543
1544 1544 # check for any possibly clean files
1545 1545 if parentworking and cmp:
1546 1546 fixup = []
1547 1547 # do a full compare of any files that might have changed
1548 1548 for f in sorted(cmp):
1549 1549 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1550 1550 or ctx1[f].cmp(ctx2[f])):
1551 1551 modified.append(f)
1552 1552 else:
1553 1553 fixup.append(f)
1554 1554
1555 1555 # update dirstate for files that are actually clean
1556 1556 if fixup:
1557 1557 if listclean:
1558 1558 clean += fixup
1559 1559
1560 1560 try:
1561 1561 # updating the dirstate is optional
1562 1562 # so we don't wait on the lock
1563 1563 wlock = self.wlock(False)
1564 1564 try:
1565 1565 for f in fixup:
1566 1566 self.dirstate.normal(f)
1567 1567 finally:
1568 1568 wlock.release()
1569 1569 except error.LockError:
1570 1570 pass
1571 1571
1572 1572 if not parentworking:
1573 1573 mf1 = mfmatches(ctx1)
1574 1574 if working:
1575 1575 # we are comparing working dir against non-parent
1576 1576 # generate a pseudo-manifest for the working dir
1577 1577 mf2 = mfmatches(self['.'])
1578 1578 for f in cmp + modified + added:
1579 1579 mf2[f] = None
1580 1580 mf2.set(f, ctx2.flags(f))
1581 1581 for f in removed:
1582 1582 if f in mf2:
1583 1583 del mf2[f]
1584 1584 else:
1585 1585 # we are comparing two revisions
1586 1586 deleted, unknown, ignored = [], [], []
1587 1587 mf2 = mfmatches(ctx2)
1588 1588
1589 1589 modified, added, clean = [], [], []
1590 1590 withflags = mf1.withflags() | mf2.withflags()
1591 1591 for fn, mf2node in mf2.iteritems():
1592 1592 if fn in mf1:
1593 1593 if (fn not in deleted and
1594 1594 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1595 1595 (mf1[fn] != mf2node and
1596 1596 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1597 1597 modified.append(fn)
1598 1598 elif listclean:
1599 1599 clean.append(fn)
1600 1600 del mf1[fn]
1601 1601 elif fn not in deleted:
1602 1602 added.append(fn)
1603 1603 removed = mf1.keys()
1604 1604
1605 1605 if working and modified and not self.dirstate._checklink:
1606 1606 # Symlink placeholders may get non-symlink-like contents
1607 1607 # via user error or dereferencing by NFS or Samba servers,
1608 1608 # so we filter out any placeholders that don't look like a
1609 1609 # symlink
1610 1610 sane = []
1611 1611 for f in modified:
1612 1612 if ctx2.flags(f) == 'l':
1613 1613 d = ctx2[f].data()
1614 1614 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1615 1615 self.ui.debug('ignoring suspect symlink placeholder'
1616 1616 ' "%s"\n' % f)
1617 1617 continue
1618 1618 sane.append(f)
1619 1619 modified = sane
1620 1620
1621 1621 r = modified, added, removed, deleted, unknown, ignored, clean
1622 1622
1623 1623 if listsubrepos:
1624 1624 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1625 1625 if working:
1626 1626 rev2 = None
1627 1627 else:
1628 1628 rev2 = ctx2.substate[subpath][1]
1629 1629 try:
1630 1630 submatch = matchmod.narrowmatcher(subpath, match)
1631 1631 s = sub.status(rev2, match=submatch, ignored=listignored,
1632 1632 clean=listclean, unknown=listunknown,
1633 1633 listsubrepos=True)
1634 1634 for rfiles, sfiles in zip(r, s):
1635 1635 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1636 1636 except error.LookupError:
1637 1637 self.ui.status(_("skipping missing subrepository: %s\n")
1638 1638 % subpath)
1639 1639
1640 1640 for l in r:
1641 1641 l.sort()
1642 1642 return r
1643 1643
1644 1644 def heads(self, start=None):
1645 1645 heads = self.changelog.heads(start)
1646 1646 # sort the output in rev descending order
1647 1647 return sorted(heads, key=self.changelog.rev, reverse=True)
1648 1648
1649 1649 def branchheads(self, branch=None, start=None, closed=False):
1650 1650 '''return a (possibly filtered) list of heads for the given branch
1651 1651
1652 1652 Heads are returned in topological order, from newest to oldest.
1653 1653 If branch is None, use the dirstate branch.
1654 1654 If start is not None, return only heads reachable from start.
1655 1655 If closed is True, return heads that are marked as closed as well.
1656 1656 '''
1657 1657 if branch is None:
1658 1658 branch = self[None].branch()
1659 1659 branches = self.branchmap()
1660 1660 if branch not in branches:
1661 1661 return []
1662 1662 # the cache returns heads ordered lowest to highest
1663 1663 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1664 1664 if start is not None:
1665 1665 # filter out the heads that cannot be reached from startrev
1666 1666 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1667 1667 bheads = [h for h in bheads if h in fbheads]
1668 1668 return bheads
1669 1669
1670 1670 def branches(self, nodes):
1671 1671 if not nodes:
1672 1672 nodes = [self.changelog.tip()]
1673 1673 b = []
1674 1674 for n in nodes:
1675 1675 t = n
1676 1676 while True:
1677 1677 p = self.changelog.parents(n)
1678 1678 if p[1] != nullid or p[0] == nullid:
1679 1679 b.append((t, n, p[0], p[1]))
1680 1680 break
1681 1681 n = p[0]
1682 1682 return b
1683 1683
1684 1684 def between(self, pairs):
1685 1685 r = []
1686 1686
1687 1687 for top, bottom in pairs:
1688 1688 n, l, i = top, [], 0
1689 1689 f = 1
1690 1690
1691 1691 while n != bottom and n != nullid:
1692 1692 p = self.changelog.parents(n)[0]
1693 1693 if i == f:
1694 1694 l.append(n)
1695 1695 f = f * 2
1696 1696 n = p
1697 1697 i += 1
1698 1698
1699 1699 r.append(l)
1700 1700
1701 1701 return r
1702 1702
1703 1703 def pull(self, remote, heads=None, force=False):
1704 1704 return exchange.pull (self, remote, heads, force)
1705 1705
1706 1706 def checkpush(self, pushop):
1707 1707 """Extensions can override this function if additional checks have
1708 1708 to be performed before pushing, or call it if they override push
1709 1709 command.
1710 1710 """
1711 1711 pass
1712 1712
1713 1713 @unfilteredpropertycache
1714 1714 def prepushoutgoinghooks(self):
1715 1715 """Return util.hooks consists of "(repo, remote, outgoing)"
1716 1716 functions, which are called before pushing changesets.
1717 1717 """
1718 1718 return util.hooks()
1719 1719
1720 1720 def push(self, remote, force=False, revs=None, newbranch=False):
1721 1721 return exchange.push(self, remote, force, revs, newbranch)
1722 1722
1723 1723 def stream_in(self, remote, requirements):
1724 1724 lock = self.lock()
1725 1725 try:
1726 1726 # Save remote branchmap. We will use it later
1727 1727 # to speed up branchcache creation
1728 1728 rbranchmap = None
1729 1729 if remote.capable("branchmap"):
1730 1730 rbranchmap = remote.branchmap()
1731 1731
1732 1732 fp = remote.stream_out()
1733 1733 l = fp.readline()
1734 1734 try:
1735 1735 resp = int(l)
1736 1736 except ValueError:
1737 1737 raise error.ResponseError(
1738 1738 _('unexpected response from remote server:'), l)
1739 1739 if resp == 1:
1740 1740 raise util.Abort(_('operation forbidden by server'))
1741 1741 elif resp == 2:
1742 1742 raise util.Abort(_('locking the remote repository failed'))
1743 1743 elif resp != 0:
1744 1744 raise util.Abort(_('the server sent an unknown error code'))
1745 1745 self.ui.status(_('streaming all changes\n'))
1746 1746 l = fp.readline()
1747 1747 try:
1748 1748 total_files, total_bytes = map(int, l.split(' ', 1))
1749 1749 except (ValueError, TypeError):
1750 1750 raise error.ResponseError(
1751 1751 _('unexpected response from remote server:'), l)
1752 1752 self.ui.status(_('%d files to transfer, %s of data\n') %
1753 1753 (total_files, util.bytecount(total_bytes)))
1754 1754 handled_bytes = 0
1755 1755 self.ui.progress(_('clone'), 0, total=total_bytes)
1756 1756 start = time.time()
1757 1757
1758 1758 tr = self.transaction(_('clone'))
1759 1759 try:
1760 1760 for i in xrange(total_files):
1761 1761 # XXX doesn't support '\n' or '\r' in filenames
1762 1762 l = fp.readline()
1763 1763 try:
1764 1764 name, size = l.split('\0', 1)
1765 1765 size = int(size)
1766 1766 except (ValueError, TypeError):
1767 1767 raise error.ResponseError(
1768 1768 _('unexpected response from remote server:'), l)
1769 1769 if self.ui.debugflag:
1770 1770 self.ui.debug('adding %s (%s)\n' %
1771 1771 (name, util.bytecount(size)))
1772 1772 # for backwards compat, name was partially encoded
1773 1773 ofp = self.sopener(store.decodedir(name), 'w')
1774 1774 for chunk in util.filechunkiter(fp, limit=size):
1775 1775 handled_bytes += len(chunk)
1776 1776 self.ui.progress(_('clone'), handled_bytes,
1777 1777 total=total_bytes)
1778 1778 ofp.write(chunk)
1779 1779 ofp.close()
1780 1780 tr.close()
1781 1781 finally:
1782 1782 tr.release()
1783 1783
1784 1784 # Writing straight to files circumvented the inmemory caches
1785 1785 self.invalidate()
1786 1786
1787 1787 elapsed = time.time() - start
1788 1788 if elapsed <= 0:
1789 1789 elapsed = 0.001
1790 1790 self.ui.progress(_('clone'), None)
1791 1791 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1792 1792 (util.bytecount(total_bytes), elapsed,
1793 1793 util.bytecount(total_bytes / elapsed)))
1794 1794
1795 1795 # new requirements = old non-format requirements +
1796 1796 # new format-related
1797 1797 # requirements from the streamed-in repository
1798 1798 requirements.update(set(self.requirements) - self.supportedformats)
1799 1799 self._applyrequirements(requirements)
1800 1800 self._writerequirements()
1801 1801
1802 1802 if rbranchmap:
1803 1803 rbheads = []
1804 1804 for bheads in rbranchmap.itervalues():
1805 1805 rbheads.extend(bheads)
1806 1806
1807 1807 if rbheads:
1808 1808 rtiprev = max((int(self.changelog.rev(node))
1809 1809 for node in rbheads))
1810 1810 cache = branchmap.branchcache(rbranchmap,
1811 1811 self[rtiprev].node(),
1812 1812 rtiprev)
1813 1813 # Try to stick it as low as possible
1814 1814 # filter above served are unlikely to be fetch from a clone
1815 1815 for candidate in ('base', 'immutable', 'served'):
1816 1816 rview = self.filtered(candidate)
1817 1817 if cache.validfor(rview):
1818 1818 self._branchcaches[candidate] = cache
1819 1819 cache.write(rview)
1820 1820 break
1821 1821 self.invalidate()
1822 1822 return len(self.heads()) + 1
1823 1823 finally:
1824 1824 lock.release()
1825 1825
1826 1826 def clone(self, remote, heads=[], stream=False):
1827 1827 '''clone remote repository.
1828 1828
1829 1829 keyword arguments:
1830 1830 heads: list of revs to clone (forces use of pull)
1831 1831 stream: use streaming clone if possible'''
1832 1832
1833 1833 # now, all clients that can request uncompressed clones can
1834 1834 # read repo formats supported by all servers that can serve
1835 1835 # them.
1836 1836
1837 1837 # if revlog format changes, client will have to check version
1838 1838 # and format flags on "stream" capability, and use
1839 1839 # uncompressed only if compatible.
1840 1840
1841 1841 if not stream:
1842 1842 # if the server explicitly prefers to stream (for fast LANs)
1843 1843 stream = remote.capable('stream-preferred')
1844 1844
1845 1845 if stream and not heads:
1846 1846 # 'stream' means remote revlog format is revlogv1 only
1847 1847 if remote.capable('stream'):
1848 1848 return self.stream_in(remote, set(('revlogv1',)))
1849 1849 # otherwise, 'streamreqs' contains the remote revlog format
1850 1850 streamreqs = remote.capable('streamreqs')
1851 1851 if streamreqs:
1852 1852 streamreqs = set(streamreqs.split(','))
1853 1853 # if we support it, stream in and adjust our requirements
1854 1854 if not streamreqs - self.supportedformats:
1855 1855 return self.stream_in(remote, streamreqs)
1856 1856 return self.pull(remote, heads)
1857 1857
1858 1858 def pushkey(self, namespace, key, old, new):
1859 1859 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1860 1860 old=old, new=new)
1861 1861 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1862 1862 ret = pushkey.push(self, namespace, key, old, new)
1863 1863 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1864 1864 ret=ret)
1865 1865 return ret
1866 1866
1867 1867 def listkeys(self, namespace):
1868 1868 self.hook('prelistkeys', throw=True, namespace=namespace)
1869 1869 self.ui.debug('listing keys for "%s"\n' % namespace)
1870 1870 values = pushkey.list(self, namespace)
1871 1871 self.hook('listkeys', namespace=namespace, values=values)
1872 1872 return values
1873 1873
1874 1874 def debugwireargs(self, one, two, three=None, four=None, five=None):
1875 1875 '''used to test argument passing over the wire'''
1876 1876 return "%s %s %s %s %s" % (one, two, three, four, five)
1877 1877
1878 1878 def savecommitmessage(self, text):
1879 1879 fp = self.opener('last-message.txt', 'wb')
1880 1880 try:
1881 1881 fp.write(text)
1882 1882 finally:
1883 1883 fp.close()
1884 1884 return self.pathto(fp.name[len(self.root) + 1:])
1885 1885
1886 1886 # used to avoid circular references so destructors work
1887 1887 def aftertrans(files):
1888 1888 renamefiles = [tuple(t) for t in files]
1889 1889 def a():
1890 1890 for vfs, src, dest in renamefiles:
1891 1891 try:
1892 1892 vfs.rename(src, dest)
1893 1893 except OSError: # journal file does not yet exist
1894 1894 pass
1895 1895 return a
1896 1896
1897 1897 def undoname(fn):
1898 1898 base, name = os.path.split(fn)
1899 1899 assert name.startswith('journal')
1900 1900 return os.path.join(base, name.replace('journal', 'undo', 1))
1901 1901
1902 1902 def instance(ui, path, create):
1903 1903 return localrepository(ui, util.urllocalpath(path), create)
1904 1904
1905 1905 def islocal(path):
1906 1906 return True
General Comments 0
You need to be logged in to leave comments. Login now