##// END OF EJS Templates
repoview: have unfilteredpropertycache using the underlying cache...
Pierre-Yves David -
r19846:97896709 stable
parent child Browse files
Show More
@@ -1,2448 +1,2451 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 unfi = repo.unfiltered()
43 if unfi is repo:
44 return super(unfilteredpropertycache, self).__get__(unfi)
45 return getattr(unfi, self.name)
43 46
44 47 class filteredpropertycache(propertycache):
45 48 """propertycache that must take filtering in account"""
46 49
47 50 def cachevalue(self, obj, value):
48 51 object.__setattr__(obj, self.name, value)
49 52
50 53
51 54 def hasunfilteredcache(repo, name):
52 55 """check if a repo has an unfilteredpropertycache value for <name>"""
53 56 return name in vars(repo.unfiltered())
54 57
55 58 def unfilteredmethod(orig):
56 59 """decorate method that always need to be run on unfiltered version"""
57 60 def wrapper(repo, *args, **kwargs):
58 61 return orig(repo.unfiltered(), *args, **kwargs)
59 62 return wrapper
60 63
61 64 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 65 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 66
64 67 class localpeer(peer.peerrepository):
65 68 '''peer for a local repo; reflects only the most recent API'''
66 69
67 70 def __init__(self, repo, caps=MODERNCAPS):
68 71 peer.peerrepository.__init__(self)
69 72 self._repo = repo.filtered('served')
70 73 self.ui = repo.ui
71 74 self._caps = repo._restrictcapabilities(caps)
72 75 self.requirements = repo.requirements
73 76 self.supportedformats = repo.supportedformats
74 77
75 78 def close(self):
76 79 self._repo.close()
77 80
78 81 def _capabilities(self):
79 82 return self._caps
80 83
81 84 def local(self):
82 85 return self._repo
83 86
84 87 def canpush(self):
85 88 return True
86 89
87 90 def url(self):
88 91 return self._repo.url()
89 92
90 93 def lookup(self, key):
91 94 return self._repo.lookup(key)
92 95
93 96 def branchmap(self):
94 97 return self._repo.branchmap()
95 98
96 99 def heads(self):
97 100 return self._repo.heads()
98 101
99 102 def known(self, nodes):
100 103 return self._repo.known(nodes)
101 104
102 105 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
103 106 return self._repo.getbundle(source, heads=heads, common=common,
104 107 bundlecaps=None)
105 108
106 109 # TODO We might want to move the next two calls into legacypeer and add
107 110 # unbundle instead.
108 111
109 112 def lock(self):
110 113 return self._repo.lock()
111 114
112 115 def addchangegroup(self, cg, source, url):
113 116 return self._repo.addchangegroup(cg, source, url)
114 117
115 118 def pushkey(self, namespace, key, old, new):
116 119 return self._repo.pushkey(namespace, key, old, new)
117 120
118 121 def listkeys(self, namespace):
119 122 return self._repo.listkeys(namespace)
120 123
121 124 def debugwireargs(self, one, two, three=None, four=None, five=None):
122 125 '''used to test argument passing over the wire'''
123 126 return "%s %s %s %s %s" % (one, two, three, four, five)
124 127
125 128 class locallegacypeer(localpeer):
126 129 '''peer extension which implements legacy methods too; used for tests with
127 130 restricted capabilities'''
128 131
129 132 def __init__(self, repo):
130 133 localpeer.__init__(self, repo, caps=LEGACYCAPS)
131 134
132 135 def branches(self, nodes):
133 136 return self._repo.branches(nodes)
134 137
135 138 def between(self, pairs):
136 139 return self._repo.between(pairs)
137 140
138 141 def changegroup(self, basenodes, source):
139 142 return self._repo.changegroup(basenodes, source)
140 143
141 144 def changegroupsubset(self, bases, heads, source):
142 145 return self._repo.changegroupsubset(bases, heads, source)
143 146
144 147 class localrepository(object):
145 148
146 149 supportedformats = set(('revlogv1', 'generaldelta'))
147 150 supported = supportedformats | set(('store', 'fncache', 'shared',
148 151 'dotencode'))
149 152 openerreqs = set(('revlogv1', 'generaldelta'))
150 153 requirements = ['revlogv1']
151 154 filtername = None
152 155
153 156 def _baserequirements(self, create):
154 157 return self.requirements[:]
155 158
156 159 def __init__(self, baseui, path=None, create=False):
157 160 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
158 161 self.wopener = self.wvfs
159 162 self.root = self.wvfs.base
160 163 self.path = self.wvfs.join(".hg")
161 164 self.origroot = path
162 165 self.auditor = scmutil.pathauditor(self.root, self._checknested)
163 166 self.vfs = scmutil.vfs(self.path)
164 167 self.opener = self.vfs
165 168 self.baseui = baseui
166 169 self.ui = baseui.copy()
167 170 # A list of callback to shape the phase if no data were found.
168 171 # Callback are in the form: func(repo, roots) --> processed root.
169 172 # This list it to be filled by extension during repo setup
170 173 self._phasedefaults = []
171 174 try:
172 175 self.ui.readconfig(self.join("hgrc"), self.root)
173 176 extensions.loadall(self.ui)
174 177 except IOError:
175 178 pass
176 179
177 180 if not self.vfs.isdir():
178 181 if create:
179 182 if not self.wvfs.exists():
180 183 self.wvfs.makedirs()
181 184 self.vfs.makedir(notindexed=True)
182 185 requirements = self._baserequirements(create)
183 186 if self.ui.configbool('format', 'usestore', True):
184 187 self.vfs.mkdir("store")
185 188 requirements.append("store")
186 189 if self.ui.configbool('format', 'usefncache', True):
187 190 requirements.append("fncache")
188 191 if self.ui.configbool('format', 'dotencode', True):
189 192 requirements.append('dotencode')
190 193 # create an invalid changelog
191 194 self.vfs.append(
192 195 "00changelog.i",
193 196 '\0\0\0\2' # represents revlogv2
194 197 ' dummy changelog to prevent using the old repo layout'
195 198 )
196 199 if self.ui.configbool('format', 'generaldelta', False):
197 200 requirements.append("generaldelta")
198 201 requirements = set(requirements)
199 202 else:
200 203 raise error.RepoError(_("repository %s not found") % path)
201 204 elif create:
202 205 raise error.RepoError(_("repository %s already exists") % path)
203 206 else:
204 207 try:
205 208 requirements = scmutil.readrequires(self.vfs, self.supported)
206 209 except IOError, inst:
207 210 if inst.errno != errno.ENOENT:
208 211 raise
209 212 requirements = set()
210 213
211 214 self.sharedpath = self.path
212 215 try:
213 216 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
214 217 realpath=True)
215 218 s = vfs.base
216 219 if not vfs.exists():
217 220 raise error.RepoError(
218 221 _('.hg/sharedpath points to nonexistent directory %s') % s)
219 222 self.sharedpath = s
220 223 except IOError, inst:
221 224 if inst.errno != errno.ENOENT:
222 225 raise
223 226
224 227 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
225 228 self.spath = self.store.path
226 229 self.svfs = self.store.vfs
227 230 self.sopener = self.svfs
228 231 self.sjoin = self.store.join
229 232 self.vfs.createmode = self.store.createmode
230 233 self._applyrequirements(requirements)
231 234 if create:
232 235 self._writerequirements()
233 236
234 237
235 238 self._branchcaches = {}
236 239 self.filterpats = {}
237 240 self._datafilters = {}
238 241 self._transref = self._lockref = self._wlockref = None
239 242
240 243 # A cache for various files under .hg/ that tracks file changes,
241 244 # (used by the filecache decorator)
242 245 #
243 246 # Maps a property name to its util.filecacheentry
244 247 self._filecache = {}
245 248
246 249 # hold sets of revision to be filtered
247 250 # should be cleared when something might have changed the filter value:
248 251 # - new changesets,
249 252 # - phase change,
250 253 # - new obsolescence marker,
251 254 # - working directory parent change,
252 255 # - bookmark changes
253 256 self.filteredrevcache = {}
254 257
255 258 def close(self):
256 259 pass
257 260
258 261 def _restrictcapabilities(self, caps):
259 262 return caps
260 263
261 264 def _applyrequirements(self, requirements):
262 265 self.requirements = requirements
263 266 self.sopener.options = dict((r, 1) for r in requirements
264 267 if r in self.openerreqs)
265 268
266 269 def _writerequirements(self):
267 270 reqfile = self.opener("requires", "w")
268 271 for r in sorted(self.requirements):
269 272 reqfile.write("%s\n" % r)
270 273 reqfile.close()
271 274
272 275 def _checknested(self, path):
273 276 """Determine if path is a legal nested repository."""
274 277 if not path.startswith(self.root):
275 278 return False
276 279 subpath = path[len(self.root) + 1:]
277 280 normsubpath = util.pconvert(subpath)
278 281
279 282 # XXX: Checking against the current working copy is wrong in
280 283 # the sense that it can reject things like
281 284 #
282 285 # $ hg cat -r 10 sub/x.txt
283 286 #
284 287 # if sub/ is no longer a subrepository in the working copy
285 288 # parent revision.
286 289 #
287 290 # However, it can of course also allow things that would have
288 291 # been rejected before, such as the above cat command if sub/
289 292 # is a subrepository now, but was a normal directory before.
290 293 # The old path auditor would have rejected by mistake since it
291 294 # panics when it sees sub/.hg/.
292 295 #
293 296 # All in all, checking against the working copy seems sensible
294 297 # since we want to prevent access to nested repositories on
295 298 # the filesystem *now*.
296 299 ctx = self[None]
297 300 parts = util.splitpath(subpath)
298 301 while parts:
299 302 prefix = '/'.join(parts)
300 303 if prefix in ctx.substate:
301 304 if prefix == normsubpath:
302 305 return True
303 306 else:
304 307 sub = ctx.sub(prefix)
305 308 return sub.checknested(subpath[len(prefix) + 1:])
306 309 else:
307 310 parts.pop()
308 311 return False
309 312
310 313 def peer(self):
311 314 return localpeer(self) # not cached to avoid reference cycle
312 315
313 316 def unfiltered(self):
314 317 """Return unfiltered version of the repository
315 318
316 319 Intended to be overwritten by filtered repo."""
317 320 return self
318 321
319 322 def filtered(self, name):
320 323 """Return a filtered version of a repository"""
321 324 # build a new class with the mixin and the current class
322 325 # (possibly subclass of the repo)
323 326 class proxycls(repoview.repoview, self.unfiltered().__class__):
324 327 pass
325 328 return proxycls(self, name)
326 329
327 330 @repofilecache('bookmarks')
328 331 def _bookmarks(self):
329 332 return bookmarks.bmstore(self)
330 333
331 334 @repofilecache('bookmarks.current')
332 335 def _bookmarkcurrent(self):
333 336 return bookmarks.readcurrent(self)
334 337
335 338 def bookmarkheads(self, bookmark):
336 339 name = bookmark.split('@', 1)[0]
337 340 heads = []
338 341 for mark, n in self._bookmarks.iteritems():
339 342 if mark.split('@', 1)[0] == name:
340 343 heads.append(n)
341 344 return heads
342 345
343 346 @storecache('phaseroots')
344 347 def _phasecache(self):
345 348 return phases.phasecache(self, self._phasedefaults)
346 349
347 350 @storecache('obsstore')
348 351 def obsstore(self):
349 352 store = obsolete.obsstore(self.sopener)
350 353 if store and not obsolete._enabled:
351 354 # message is rare enough to not be translated
352 355 msg = 'obsolete feature not enabled but %i markers found!\n'
353 356 self.ui.warn(msg % len(list(store)))
354 357 return store
355 358
356 359 @storecache('00changelog.i')
357 360 def changelog(self):
358 361 c = changelog.changelog(self.sopener)
359 362 if 'HG_PENDING' in os.environ:
360 363 p = os.environ['HG_PENDING']
361 364 if p.startswith(self.root):
362 365 c.readpending('00changelog.i.a')
363 366 return c
364 367
365 368 @storecache('00manifest.i')
366 369 def manifest(self):
367 370 return manifest.manifest(self.sopener)
368 371
369 372 @repofilecache('dirstate')
370 373 def dirstate(self):
371 374 warned = [0]
372 375 def validate(node):
373 376 try:
374 377 self.changelog.rev(node)
375 378 return node
376 379 except error.LookupError:
377 380 if not warned[0]:
378 381 warned[0] = True
379 382 self.ui.warn(_("warning: ignoring unknown"
380 383 " working parent %s!\n") % short(node))
381 384 return nullid
382 385
383 386 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384 387
385 388 def __getitem__(self, changeid):
386 389 if changeid is None:
387 390 return context.workingctx(self)
388 391 return context.changectx(self, changeid)
389 392
390 393 def __contains__(self, changeid):
391 394 try:
392 395 return bool(self.lookup(changeid))
393 396 except error.RepoLookupError:
394 397 return False
395 398
396 399 def __nonzero__(self):
397 400 return True
398 401
399 402 def __len__(self):
400 403 return len(self.changelog)
401 404
402 405 def __iter__(self):
403 406 return iter(self.changelog)
404 407
405 408 def revs(self, expr, *args):
406 409 '''Return a list of revisions matching the given revset'''
407 410 expr = revset.formatspec(expr, *args)
408 411 m = revset.match(None, expr)
409 412 return [r for r in m(self, list(self))]
410 413
411 414 def set(self, expr, *args):
412 415 '''
413 416 Yield a context for each matching revision, after doing arg
414 417 replacement via revset.formatspec
415 418 '''
416 419 for r in self.revs(expr, *args):
417 420 yield self[r]
418 421
419 422 def url(self):
420 423 return 'file:' + self.root
421 424
422 425 def hook(self, name, throw=False, **args):
423 426 return hook.hook(self.ui, self, name, throw, **args)
424 427
425 428 @unfilteredmethod
426 429 def _tag(self, names, node, message, local, user, date, extra={}):
427 430 if isinstance(names, str):
428 431 names = (names,)
429 432
430 433 branches = self.branchmap()
431 434 for name in names:
432 435 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 436 local=local)
434 437 if name in branches:
435 438 self.ui.warn(_("warning: tag %s conflicts with existing"
436 439 " branch name\n") % name)
437 440
438 441 def writetags(fp, names, munge, prevtags):
439 442 fp.seek(0, 2)
440 443 if prevtags and prevtags[-1] != '\n':
441 444 fp.write('\n')
442 445 for name in names:
443 446 m = munge and munge(name) or name
444 447 if (self._tagscache.tagtypes and
445 448 name in self._tagscache.tagtypes):
446 449 old = self.tags().get(name, nullid)
447 450 fp.write('%s %s\n' % (hex(old), m))
448 451 fp.write('%s %s\n' % (hex(node), m))
449 452 fp.close()
450 453
451 454 prevtags = ''
452 455 if local:
453 456 try:
454 457 fp = self.opener('localtags', 'r+')
455 458 except IOError:
456 459 fp = self.opener('localtags', 'a')
457 460 else:
458 461 prevtags = fp.read()
459 462
460 463 # local tags are stored in the current charset
461 464 writetags(fp, names, None, prevtags)
462 465 for name in names:
463 466 self.hook('tag', node=hex(node), tag=name, local=local)
464 467 return
465 468
466 469 try:
467 470 fp = self.wfile('.hgtags', 'rb+')
468 471 except IOError, e:
469 472 if e.errno != errno.ENOENT:
470 473 raise
471 474 fp = self.wfile('.hgtags', 'ab')
472 475 else:
473 476 prevtags = fp.read()
474 477
475 478 # committed tags are stored in UTF-8
476 479 writetags(fp, names, encoding.fromlocal, prevtags)
477 480
478 481 fp.close()
479 482
480 483 self.invalidatecaches()
481 484
482 485 if '.hgtags' not in self.dirstate:
483 486 self[None].add(['.hgtags'])
484 487
485 488 m = matchmod.exact(self.root, '', ['.hgtags'])
486 489 tagnode = self.commit(message, user, date, extra=extra, match=m)
487 490
488 491 for name in names:
489 492 self.hook('tag', node=hex(node), tag=name, local=local)
490 493
491 494 return tagnode
492 495
493 496 def tag(self, names, node, message, local, user, date):
494 497 '''tag a revision with one or more symbolic names.
495 498
496 499 names is a list of strings or, when adding a single tag, names may be a
497 500 string.
498 501
499 502 if local is True, the tags are stored in a per-repository file.
500 503 otherwise, they are stored in the .hgtags file, and a new
501 504 changeset is committed with the change.
502 505
503 506 keyword arguments:
504 507
505 508 local: whether to store tags in non-version-controlled file
506 509 (default False)
507 510
508 511 message: commit message to use if committing
509 512
510 513 user: name of user to use if committing
511 514
512 515 date: date tuple to use if committing'''
513 516
514 517 if not local:
515 518 for x in self.status()[:5]:
516 519 if '.hgtags' in x:
517 520 raise util.Abort(_('working copy of .hgtags is changed '
518 521 '(please commit .hgtags manually)'))
519 522
520 523 self.tags() # instantiate the cache
521 524 self._tag(names, node, message, local, user, date)
522 525
523 526 @filteredpropertycache
524 527 def _tagscache(self):
525 528 '''Returns a tagscache object that contains various tags related
526 529 caches.'''
527 530
528 531 # This simplifies its cache management by having one decorated
529 532 # function (this one) and the rest simply fetch things from it.
530 533 class tagscache(object):
531 534 def __init__(self):
532 535 # These two define the set of tags for this repository. tags
533 536 # maps tag name to node; tagtypes maps tag name to 'global' or
534 537 # 'local'. (Global tags are defined by .hgtags across all
535 538 # heads, and local tags are defined in .hg/localtags.)
536 539 # They constitute the in-memory cache of tags.
537 540 self.tags = self.tagtypes = None
538 541
539 542 self.nodetagscache = self.tagslist = None
540 543
541 544 cache = tagscache()
542 545 cache.tags, cache.tagtypes = self._findtags()
543 546
544 547 return cache
545 548
546 549 def tags(self):
547 550 '''return a mapping of tag to node'''
548 551 t = {}
549 552 if self.changelog.filteredrevs:
550 553 tags, tt = self._findtags()
551 554 else:
552 555 tags = self._tagscache.tags
553 556 for k, v in tags.iteritems():
554 557 try:
555 558 # ignore tags to unknown nodes
556 559 self.changelog.rev(v)
557 560 t[k] = v
558 561 except (error.LookupError, ValueError):
559 562 pass
560 563 return t
561 564
562 565 def _findtags(self):
563 566 '''Do the hard work of finding tags. Return a pair of dicts
564 567 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 568 maps tag name to a string like \'global\' or \'local\'.
566 569 Subclasses or extensions are free to add their own tags, but
567 570 should be aware that the returned dicts will be retained for the
568 571 duration of the localrepo object.'''
569 572
570 573 # XXX what tagtype should subclasses/extensions use? Currently
571 574 # mq and bookmarks add tags, but do not set the tagtype at all.
572 575 # Should each extension invent its own tag type? Should there
573 576 # be one tagtype for all such "virtual" tags? Or is the status
574 577 # quo fine?
575 578
576 579 alltags = {} # map tag name to (node, hist)
577 580 tagtypes = {}
578 581
579 582 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 583 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581 584
582 585 # Build the return dicts. Have to re-encode tag names because
583 586 # the tags module always uses UTF-8 (in order not to lose info
584 587 # writing to the cache), but the rest of Mercurial wants them in
585 588 # local encoding.
586 589 tags = {}
587 590 for (name, (node, hist)) in alltags.iteritems():
588 591 if node != nullid:
589 592 tags[encoding.tolocal(name)] = node
590 593 tags['tip'] = self.changelog.tip()
591 594 tagtypes = dict([(encoding.tolocal(name), value)
592 595 for (name, value) in tagtypes.iteritems()])
593 596 return (tags, tagtypes)
594 597
595 598 def tagtype(self, tagname):
596 599 '''
597 600 return the type of the given tag. result can be:
598 601
599 602 'local' : a local tag
600 603 'global' : a global tag
601 604 None : tag does not exist
602 605 '''
603 606
604 607 return self._tagscache.tagtypes.get(tagname)
605 608
606 609 def tagslist(self):
607 610 '''return a list of tags ordered by revision'''
608 611 if not self._tagscache.tagslist:
609 612 l = []
610 613 for t, n in self.tags().iteritems():
611 614 r = self.changelog.rev(n)
612 615 l.append((r, t, n))
613 616 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614 617
615 618 return self._tagscache.tagslist
616 619
617 620 def nodetags(self, node):
618 621 '''return the tags associated with a node'''
619 622 if not self._tagscache.nodetagscache:
620 623 nodetagscache = {}
621 624 for t, n in self._tagscache.tags.iteritems():
622 625 nodetagscache.setdefault(n, []).append(t)
623 626 for tags in nodetagscache.itervalues():
624 627 tags.sort()
625 628 self._tagscache.nodetagscache = nodetagscache
626 629 return self._tagscache.nodetagscache.get(node, [])
627 630
628 631 def nodebookmarks(self, node):
629 632 marks = []
630 633 for bookmark, n in self._bookmarks.iteritems():
631 634 if n == node:
632 635 marks.append(bookmark)
633 636 return sorted(marks)
634 637
635 638 def branchmap(self):
636 639 '''returns a dictionary {branch: [branchheads]}'''
637 640 branchmap.updatecache(self)
638 641 return self._branchcaches[self.filtername]
639 642
640 643
641 644 def _branchtip(self, heads):
642 645 '''return the tipmost branch head in heads'''
643 646 tip = heads[-1]
644 647 for h in reversed(heads):
645 648 if not self[h].closesbranch():
646 649 tip = h
647 650 break
648 651 return tip
649 652
650 653 def branchtip(self, branch):
651 654 '''return the tip node for a given branch'''
652 655 if branch not in self.branchmap():
653 656 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
654 657 return self._branchtip(self.branchmap()[branch])
655 658
656 659 def branchtags(self):
657 660 '''return a dict where branch names map to the tipmost head of
658 661 the branch, open heads come before closed'''
659 662 bt = {}
660 663 for bn, heads in self.branchmap().iteritems():
661 664 bt[bn] = self._branchtip(heads)
662 665 return bt
663 666
664 667 def lookup(self, key):
665 668 return self[key].node()
666 669
667 670 def lookupbranch(self, key, remote=None):
668 671 repo = remote or self
669 672 if key in repo.branchmap():
670 673 return key
671 674
672 675 repo = (remote and remote.local()) and remote or self
673 676 return repo[key].branch()
674 677
675 678 def known(self, nodes):
676 679 nm = self.changelog.nodemap
677 680 pc = self._phasecache
678 681 result = []
679 682 for n in nodes:
680 683 r = nm.get(n)
681 684 resp = not (r is None or pc.phase(self, r) >= phases.secret)
682 685 result.append(resp)
683 686 return result
684 687
685 688 def local(self):
686 689 return self
687 690
688 691 def cancopy(self):
689 692 return self.local() # so statichttprepo's override of local() works
690 693
691 694 def join(self, f):
692 695 return os.path.join(self.path, f)
693 696
694 697 def wjoin(self, f):
695 698 return os.path.join(self.root, f)
696 699
697 700 def file(self, f):
698 701 if f[0] == '/':
699 702 f = f[1:]
700 703 return filelog.filelog(self.sopener, f)
701 704
702 705 def changectx(self, changeid):
703 706 return self[changeid]
704 707
705 708 def parents(self, changeid=None):
706 709 '''get list of changectxs for parents of changeid'''
707 710 return self[changeid].parents()
708 711
709 712 def setparents(self, p1, p2=nullid):
710 713 copies = self.dirstate.setparents(p1, p2)
711 714 pctx = self[p1]
712 715 if copies:
713 716 # Adjust copy records, the dirstate cannot do it, it
714 717 # requires access to parents manifests. Preserve them
715 718 # only for entries added to first parent.
716 719 for f in copies:
717 720 if f not in pctx and copies[f] in pctx:
718 721 self.dirstate.copy(copies[f], f)
719 722 if p2 == nullid:
720 723 for f, s in sorted(self.dirstate.copies().items()):
721 724 if f not in pctx and s not in pctx:
722 725 self.dirstate.copy(None, f)
723 726
724 727 def filectx(self, path, changeid=None, fileid=None):
725 728 """changeid can be a changeset revision, node, or tag.
726 729 fileid can be a file revision or node."""
727 730 return context.filectx(self, path, changeid, fileid)
728 731
729 732 def getcwd(self):
730 733 return self.dirstate.getcwd()
731 734
732 735 def pathto(self, f, cwd=None):
733 736 return self.dirstate.pathto(f, cwd)
734 737
735 738 def wfile(self, f, mode='r'):
736 739 return self.wopener(f, mode)
737 740
738 741 def _link(self, f):
739 742 return self.wvfs.islink(f)
740 743
741 744 def _loadfilter(self, filter):
742 745 if filter not in self.filterpats:
743 746 l = []
744 747 for pat, cmd in self.ui.configitems(filter):
745 748 if cmd == '!':
746 749 continue
747 750 mf = matchmod.match(self.root, '', [pat])
748 751 fn = None
749 752 params = cmd
750 753 for name, filterfn in self._datafilters.iteritems():
751 754 if cmd.startswith(name):
752 755 fn = filterfn
753 756 params = cmd[len(name):].lstrip()
754 757 break
755 758 if not fn:
756 759 fn = lambda s, c, **kwargs: util.filter(s, c)
757 760 # Wrap old filters not supporting keyword arguments
758 761 if not inspect.getargspec(fn)[2]:
759 762 oldfn = fn
760 763 fn = lambda s, c, **kwargs: oldfn(s, c)
761 764 l.append((mf, fn, params))
762 765 self.filterpats[filter] = l
763 766 return self.filterpats[filter]
764 767
765 768 def _filter(self, filterpats, filename, data):
766 769 for mf, fn, cmd in filterpats:
767 770 if mf(filename):
768 771 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
769 772 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
770 773 break
771 774
772 775 return data
773 776
774 777 @unfilteredpropertycache
775 778 def _encodefilterpats(self):
776 779 return self._loadfilter('encode')
777 780
778 781 @unfilteredpropertycache
779 782 def _decodefilterpats(self):
780 783 return self._loadfilter('decode')
781 784
782 785 def adddatafilter(self, name, filter):
783 786 self._datafilters[name] = filter
784 787
785 788 def wread(self, filename):
786 789 if self._link(filename):
787 790 data = self.wvfs.readlink(filename)
788 791 else:
789 792 data = self.wopener.read(filename)
790 793 return self._filter(self._encodefilterpats, filename, data)
791 794
792 795 def wwrite(self, filename, data, flags):
793 796 data = self._filter(self._decodefilterpats, filename, data)
794 797 if 'l' in flags:
795 798 self.wopener.symlink(data, filename)
796 799 else:
797 800 self.wopener.write(filename, data)
798 801 if 'x' in flags:
799 802 self.wvfs.setflags(filename, False, True)
800 803
801 804 def wwritedata(self, filename, data):
802 805 return self._filter(self._decodefilterpats, filename, data)
803 806
804 807 def transaction(self, desc):
805 808 tr = self._transref and self._transref() or None
806 809 if tr and tr.running():
807 810 return tr.nest()
808 811
809 812 # abort here if the journal already exists
810 813 if self.svfs.exists("journal"):
811 814 raise error.RepoError(
812 815 _("abandoned transaction found - run hg recover"))
813 816
814 817 self._writejournal(desc)
815 818 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
816 819
817 820 tr = transaction.transaction(self.ui.warn, self.sopener,
818 821 self.sjoin("journal"),
819 822 aftertrans(renames),
820 823 self.store.createmode)
821 824 self._transref = weakref.ref(tr)
822 825 return tr
823 826
824 827 def _journalfiles(self):
825 828 return ((self.svfs, 'journal'),
826 829 (self.vfs, 'journal.dirstate'),
827 830 (self.vfs, 'journal.branch'),
828 831 (self.vfs, 'journal.desc'),
829 832 (self.vfs, 'journal.bookmarks'),
830 833 (self.svfs, 'journal.phaseroots'))
831 834
832 835 def undofiles(self):
833 836 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
834 837
835 838 def _writejournal(self, desc):
836 839 self.opener.write("journal.dirstate",
837 840 self.opener.tryread("dirstate"))
838 841 self.opener.write("journal.branch",
839 842 encoding.fromlocal(self.dirstate.branch()))
840 843 self.opener.write("journal.desc",
841 844 "%d\n%s\n" % (len(self), desc))
842 845 self.opener.write("journal.bookmarks",
843 846 self.opener.tryread("bookmarks"))
844 847 self.sopener.write("journal.phaseroots",
845 848 self.sopener.tryread("phaseroots"))
846 849
847 850 def recover(self):
848 851 lock = self.lock()
849 852 try:
850 853 if self.svfs.exists("journal"):
851 854 self.ui.status(_("rolling back interrupted transaction\n"))
852 855 transaction.rollback(self.sopener, self.sjoin("journal"),
853 856 self.ui.warn)
854 857 self.invalidate()
855 858 return True
856 859 else:
857 860 self.ui.warn(_("no interrupted transaction available\n"))
858 861 return False
859 862 finally:
860 863 lock.release()
861 864
862 865 def rollback(self, dryrun=False, force=False):
863 866 wlock = lock = None
864 867 try:
865 868 wlock = self.wlock()
866 869 lock = self.lock()
867 870 if self.svfs.exists("undo"):
868 871 return self._rollback(dryrun, force)
869 872 else:
870 873 self.ui.warn(_("no rollback information available\n"))
871 874 return 1
872 875 finally:
873 876 release(lock, wlock)
874 877
875 878 @unfilteredmethod # Until we get smarter cache management
876 879 def _rollback(self, dryrun, force):
877 880 ui = self.ui
878 881 try:
879 882 args = self.opener.read('undo.desc').splitlines()
880 883 (oldlen, desc, detail) = (int(args[0]), args[1], None)
881 884 if len(args) >= 3:
882 885 detail = args[2]
883 886 oldtip = oldlen - 1
884 887
885 888 if detail and ui.verbose:
886 889 msg = (_('repository tip rolled back to revision %s'
887 890 ' (undo %s: %s)\n')
888 891 % (oldtip, desc, detail))
889 892 else:
890 893 msg = (_('repository tip rolled back to revision %s'
891 894 ' (undo %s)\n')
892 895 % (oldtip, desc))
893 896 except IOError:
894 897 msg = _('rolling back unknown transaction\n')
895 898 desc = None
896 899
897 900 if not force and self['.'] != self['tip'] and desc == 'commit':
898 901 raise util.Abort(
899 902 _('rollback of last commit while not checked out '
900 903 'may lose data'), hint=_('use -f to force'))
901 904
902 905 ui.status(msg)
903 906 if dryrun:
904 907 return 0
905 908
906 909 parents = self.dirstate.parents()
907 910 self.destroying()
908 911 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
909 912 if self.vfs.exists('undo.bookmarks'):
910 913 self.vfs.rename('undo.bookmarks', 'bookmarks')
911 914 if self.svfs.exists('undo.phaseroots'):
912 915 self.svfs.rename('undo.phaseroots', 'phaseroots')
913 916 self.invalidate()
914 917
915 918 parentgone = (parents[0] not in self.changelog.nodemap or
916 919 parents[1] not in self.changelog.nodemap)
917 920 if parentgone:
918 921 self.vfs.rename('undo.dirstate', 'dirstate')
919 922 try:
920 923 branch = self.opener.read('undo.branch')
921 924 self.dirstate.setbranch(encoding.tolocal(branch))
922 925 except IOError:
923 926 ui.warn(_('named branch could not be reset: '
924 927 'current branch is still \'%s\'\n')
925 928 % self.dirstate.branch())
926 929
927 930 self.dirstate.invalidate()
928 931 parents = tuple([p.rev() for p in self.parents()])
929 932 if len(parents) > 1:
930 933 ui.status(_('working directory now based on '
931 934 'revisions %d and %d\n') % parents)
932 935 else:
933 936 ui.status(_('working directory now based on '
934 937 'revision %d\n') % parents)
935 938 # TODO: if we know which new heads may result from this rollback, pass
936 939 # them to destroy(), which will prevent the branchhead cache from being
937 940 # invalidated.
938 941 self.destroyed()
939 942 return 0
940 943
941 944 def invalidatecaches(self):
942 945
943 946 if '_tagscache' in vars(self):
944 947 # can't use delattr on proxy
945 948 del self.__dict__['_tagscache']
946 949
947 950 self.unfiltered()._branchcaches.clear()
948 951 self.invalidatevolatilesets()
949 952
950 953 def invalidatevolatilesets(self):
951 954 self.filteredrevcache.clear()
952 955 obsolete.clearobscaches(self)
953 956
954 957 def invalidatedirstate(self):
955 958 '''Invalidates the dirstate, causing the next call to dirstate
956 959 to check if it was modified since the last time it was read,
957 960 rereading it if it has.
958 961
959 962 This is different to dirstate.invalidate() that it doesn't always
960 963 rereads the dirstate. Use dirstate.invalidate() if you want to
961 964 explicitly read the dirstate again (i.e. restoring it to a previous
962 965 known good state).'''
963 966 if hasunfilteredcache(self, 'dirstate'):
964 967 for k in self.dirstate._filecache:
965 968 try:
966 969 delattr(self.dirstate, k)
967 970 except AttributeError:
968 971 pass
969 972 delattr(self.unfiltered(), 'dirstate')
970 973
971 974 def invalidate(self):
972 975 unfiltered = self.unfiltered() # all file caches are stored unfiltered
973 976 for k in self._filecache:
974 977 # dirstate is invalidated separately in invalidatedirstate()
975 978 if k == 'dirstate':
976 979 continue
977 980
978 981 try:
979 982 delattr(unfiltered, k)
980 983 except AttributeError:
981 984 pass
982 985 self.invalidatecaches()
983 986
984 987 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
985 988 try:
986 989 l = lock.lock(lockname, 0, releasefn, desc=desc)
987 990 except error.LockHeld, inst:
988 991 if not wait:
989 992 raise
990 993 self.ui.warn(_("waiting for lock on %s held by %r\n") %
991 994 (desc, inst.locker))
992 995 # default to 600 seconds timeout
993 996 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
994 997 releasefn, desc=desc)
995 998 if acquirefn:
996 999 acquirefn()
997 1000 return l
998 1001
999 1002 def _afterlock(self, callback):
1000 1003 """add a callback to the current repository lock.
1001 1004
1002 1005 The callback will be executed on lock release."""
1003 1006 l = self._lockref and self._lockref()
1004 1007 if l:
1005 1008 l.postrelease.append(callback)
1006 1009 else:
1007 1010 callback()
1008 1011
1009 1012 def lock(self, wait=True):
1010 1013 '''Lock the repository store (.hg/store) and return a weak reference
1011 1014 to the lock. Use this before modifying the store (e.g. committing or
1012 1015 stripping). If you are opening a transaction, get a lock as well.)'''
1013 1016 l = self._lockref and self._lockref()
1014 1017 if l is not None and l.held:
1015 1018 l.lock()
1016 1019 return l
1017 1020
1018 1021 def unlock():
1019 1022 self.store.write()
1020 1023 if hasunfilteredcache(self, '_phasecache'):
1021 1024 self._phasecache.write()
1022 1025 for k, ce in self._filecache.items():
1023 1026 if k == 'dirstate' or k not in self.__dict__:
1024 1027 continue
1025 1028 ce.refresh()
1026 1029
1027 1030 l = self._lock(self.sjoin("lock"), wait, unlock,
1028 1031 self.invalidate, _('repository %s') % self.origroot)
1029 1032 self._lockref = weakref.ref(l)
1030 1033 return l
1031 1034
1032 1035 def wlock(self, wait=True):
1033 1036 '''Lock the non-store parts of the repository (everything under
1034 1037 .hg except .hg/store) and return a weak reference to the lock.
1035 1038 Use this before modifying files in .hg.'''
1036 1039 l = self._wlockref and self._wlockref()
1037 1040 if l is not None and l.held:
1038 1041 l.lock()
1039 1042 return l
1040 1043
1041 1044 def unlock():
1042 1045 self.dirstate.write()
1043 1046 self._filecache['dirstate'].refresh()
1044 1047
1045 1048 l = self._lock(self.join("wlock"), wait, unlock,
1046 1049 self.invalidatedirstate, _('working directory of %s') %
1047 1050 self.origroot)
1048 1051 self._wlockref = weakref.ref(l)
1049 1052 return l
1050 1053
1051 1054 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1052 1055 """
1053 1056 commit an individual file as part of a larger transaction
1054 1057 """
1055 1058
1056 1059 fname = fctx.path()
1057 1060 text = fctx.data()
1058 1061 flog = self.file(fname)
1059 1062 fparent1 = manifest1.get(fname, nullid)
1060 1063 fparent2 = fparent2o = manifest2.get(fname, nullid)
1061 1064
1062 1065 meta = {}
1063 1066 copy = fctx.renamed()
1064 1067 if copy and copy[0] != fname:
1065 1068 # Mark the new revision of this file as a copy of another
1066 1069 # file. This copy data will effectively act as a parent
1067 1070 # of this new revision. If this is a merge, the first
1068 1071 # parent will be the nullid (meaning "look up the copy data")
1069 1072 # and the second one will be the other parent. For example:
1070 1073 #
1071 1074 # 0 --- 1 --- 3 rev1 changes file foo
1072 1075 # \ / rev2 renames foo to bar and changes it
1073 1076 # \- 2 -/ rev3 should have bar with all changes and
1074 1077 # should record that bar descends from
1075 1078 # bar in rev2 and foo in rev1
1076 1079 #
1077 1080 # this allows this merge to succeed:
1078 1081 #
1079 1082 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1080 1083 # \ / merging rev3 and rev4 should use bar@rev2
1081 1084 # \- 2 --- 4 as the merge base
1082 1085 #
1083 1086
1084 1087 cfname = copy[0]
1085 1088 crev = manifest1.get(cfname)
1086 1089 newfparent = fparent2
1087 1090
1088 1091 if manifest2: # branch merge
1089 1092 if fparent2 == nullid or crev is None: # copied on remote side
1090 1093 if cfname in manifest2:
1091 1094 crev = manifest2[cfname]
1092 1095 newfparent = fparent1
1093 1096
1094 1097 # find source in nearest ancestor if we've lost track
1095 1098 if not crev:
1096 1099 self.ui.debug(" %s: searching for copy revision for %s\n" %
1097 1100 (fname, cfname))
1098 1101 for ancestor in self[None].ancestors():
1099 1102 if cfname in ancestor:
1100 1103 crev = ancestor[cfname].filenode()
1101 1104 break
1102 1105
1103 1106 if crev:
1104 1107 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1105 1108 meta["copy"] = cfname
1106 1109 meta["copyrev"] = hex(crev)
1107 1110 fparent1, fparent2 = nullid, newfparent
1108 1111 else:
1109 1112 self.ui.warn(_("warning: can't find ancestor for '%s' "
1110 1113 "copied from '%s'!\n") % (fname, cfname))
1111 1114
1112 1115 elif fparent2 != nullid:
1113 1116 # is one parent an ancestor of the other?
1114 1117 fparentancestor = flog.ancestor(fparent1, fparent2)
1115 1118 if fparentancestor == fparent1:
1116 1119 fparent1, fparent2 = fparent2, nullid
1117 1120 elif fparentancestor == fparent2:
1118 1121 fparent2 = nullid
1119 1122
1120 1123 # is the file changed?
1121 1124 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1122 1125 changelist.append(fname)
1123 1126 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1124 1127
1125 1128 # are just the flags changed during merge?
1126 1129 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1127 1130 changelist.append(fname)
1128 1131
1129 1132 return fparent1
1130 1133
1131 1134 @unfilteredmethod
1132 1135 def commit(self, text="", user=None, date=None, match=None, force=False,
1133 1136 editor=False, extra={}):
1134 1137 """Add a new revision to current repository.
1135 1138
1136 1139 Revision information is gathered from the working directory,
1137 1140 match can be used to filter the committed files. If editor is
1138 1141 supplied, it is called to get a commit message.
1139 1142 """
1140 1143
1141 1144 def fail(f, msg):
1142 1145 raise util.Abort('%s: %s' % (f, msg))
1143 1146
1144 1147 if not match:
1145 1148 match = matchmod.always(self.root, '')
1146 1149
1147 1150 if not force:
1148 1151 vdirs = []
1149 1152 match.explicitdir = vdirs.append
1150 1153 match.bad = fail
1151 1154
1152 1155 wlock = self.wlock()
1153 1156 try:
1154 1157 wctx = self[None]
1155 1158 merge = len(wctx.parents()) > 1
1156 1159
1157 1160 if (not force and merge and match and
1158 1161 (match.files() or match.anypats())):
1159 1162 raise util.Abort(_('cannot partially commit a merge '
1160 1163 '(do not specify files or patterns)'))
1161 1164
1162 1165 changes = self.status(match=match, clean=force)
1163 1166 if force:
1164 1167 changes[0].extend(changes[6]) # mq may commit unchanged files
1165 1168
1166 1169 # check subrepos
1167 1170 subs = []
1168 1171 commitsubs = set()
1169 1172 newstate = wctx.substate.copy()
1170 1173 # only manage subrepos and .hgsubstate if .hgsub is present
1171 1174 if '.hgsub' in wctx:
1172 1175 # we'll decide whether to track this ourselves, thanks
1173 1176 if '.hgsubstate' in changes[0]:
1174 1177 changes[0].remove('.hgsubstate')
1175 1178 if '.hgsubstate' in changes[2]:
1176 1179 changes[2].remove('.hgsubstate')
1177 1180
1178 1181 # compare current state to last committed state
1179 1182 # build new substate based on last committed state
1180 1183 oldstate = wctx.p1().substate
1181 1184 for s in sorted(newstate.keys()):
1182 1185 if not match(s):
1183 1186 # ignore working copy, use old state if present
1184 1187 if s in oldstate:
1185 1188 newstate[s] = oldstate[s]
1186 1189 continue
1187 1190 if not force:
1188 1191 raise util.Abort(
1189 1192 _("commit with new subrepo %s excluded") % s)
1190 1193 if wctx.sub(s).dirty(True):
1191 1194 if not self.ui.configbool('ui', 'commitsubrepos'):
1192 1195 raise util.Abort(
1193 1196 _("uncommitted changes in subrepo %s") % s,
1194 1197 hint=_("use --subrepos for recursive commit"))
1195 1198 subs.append(s)
1196 1199 commitsubs.add(s)
1197 1200 else:
1198 1201 bs = wctx.sub(s).basestate()
1199 1202 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1200 1203 if oldstate.get(s, (None, None, None))[1] != bs:
1201 1204 subs.append(s)
1202 1205
1203 1206 # check for removed subrepos
1204 1207 for p in wctx.parents():
1205 1208 r = [s for s in p.substate if s not in newstate]
1206 1209 subs += [s for s in r if match(s)]
1207 1210 if subs:
1208 1211 if (not match('.hgsub') and
1209 1212 '.hgsub' in (wctx.modified() + wctx.added())):
1210 1213 raise util.Abort(
1211 1214 _("can't commit subrepos without .hgsub"))
1212 1215 changes[0].insert(0, '.hgsubstate')
1213 1216
1214 1217 elif '.hgsub' in changes[2]:
1215 1218 # clean up .hgsubstate when .hgsub is removed
1216 1219 if ('.hgsubstate' in wctx and
1217 1220 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1218 1221 changes[2].insert(0, '.hgsubstate')
1219 1222
1220 1223 # make sure all explicit patterns are matched
1221 1224 if not force and match.files():
1222 1225 matched = set(changes[0] + changes[1] + changes[2])
1223 1226
1224 1227 for f in match.files():
1225 1228 f = self.dirstate.normalize(f)
1226 1229 if f == '.' or f in matched or f in wctx.substate:
1227 1230 continue
1228 1231 if f in changes[3]: # missing
1229 1232 fail(f, _('file not found!'))
1230 1233 if f in vdirs: # visited directory
1231 1234 d = f + '/'
1232 1235 for mf in matched:
1233 1236 if mf.startswith(d):
1234 1237 break
1235 1238 else:
1236 1239 fail(f, _("no match under directory!"))
1237 1240 elif f not in self.dirstate:
1238 1241 fail(f, _("file not tracked!"))
1239 1242
1240 1243 cctx = context.workingctx(self, text, user, date, extra, changes)
1241 1244
1242 1245 if (not force and not extra.get("close") and not merge
1243 1246 and not cctx.files()
1244 1247 and wctx.branch() == wctx.p1().branch()):
1245 1248 return None
1246 1249
1247 1250 if merge and cctx.deleted():
1248 1251 raise util.Abort(_("cannot commit merge with missing files"))
1249 1252
1250 1253 ms = mergemod.mergestate(self)
1251 1254 for f in changes[0]:
1252 1255 if f in ms and ms[f] == 'u':
1253 1256 raise util.Abort(_("unresolved merge conflicts "
1254 1257 "(see hg help resolve)"))
1255 1258
1256 1259 if editor:
1257 1260 cctx._text = editor(self, cctx, subs)
1258 1261 edited = (text != cctx._text)
1259 1262
1260 1263 # commit subs and write new state
1261 1264 if subs:
1262 1265 for s in sorted(commitsubs):
1263 1266 sub = wctx.sub(s)
1264 1267 self.ui.status(_('committing subrepository %s\n') %
1265 1268 subrepo.subrelpath(sub))
1266 1269 sr = sub.commit(cctx._text, user, date)
1267 1270 newstate[s] = (newstate[s][0], sr)
1268 1271 subrepo.writestate(self, newstate)
1269 1272
1270 1273 # Save commit message in case this transaction gets rolled back
1271 1274 # (e.g. by a pretxncommit hook). Leave the content alone on
1272 1275 # the assumption that the user will use the same editor again.
1273 1276 msgfn = self.savecommitmessage(cctx._text)
1274 1277
1275 1278 p1, p2 = self.dirstate.parents()
1276 1279 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1277 1280 try:
1278 1281 self.hook("precommit", throw=True, parent1=hookp1,
1279 1282 parent2=hookp2)
1280 1283 ret = self.commitctx(cctx, True)
1281 1284 except: # re-raises
1282 1285 if edited:
1283 1286 self.ui.write(
1284 1287 _('note: commit message saved in %s\n') % msgfn)
1285 1288 raise
1286 1289
1287 1290 # update bookmarks, dirstate and mergestate
1288 1291 bookmarks.update(self, [p1, p2], ret)
1289 1292 cctx.markcommitted(ret)
1290 1293 ms.reset()
1291 1294 finally:
1292 1295 wlock.release()
1293 1296
1294 1297 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1295 1298 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1296 1299 self._afterlock(commithook)
1297 1300 return ret
1298 1301
1299 1302 @unfilteredmethod
1300 1303 def commitctx(self, ctx, error=False):
1301 1304 """Add a new revision to current repository.
1302 1305 Revision information is passed via the context argument.
1303 1306 """
1304 1307
1305 1308 tr = lock = None
1306 1309 removed = list(ctx.removed())
1307 1310 p1, p2 = ctx.p1(), ctx.p2()
1308 1311 user = ctx.user()
1309 1312
1310 1313 lock = self.lock()
1311 1314 try:
1312 1315 tr = self.transaction("commit")
1313 1316 trp = weakref.proxy(tr)
1314 1317
1315 1318 if ctx.files():
1316 1319 m1 = p1.manifest().copy()
1317 1320 m2 = p2.manifest()
1318 1321
1319 1322 # check in files
1320 1323 new = {}
1321 1324 changed = []
1322 1325 linkrev = len(self)
1323 1326 for f in sorted(ctx.modified() + ctx.added()):
1324 1327 self.ui.note(f + "\n")
1325 1328 try:
1326 1329 fctx = ctx[f]
1327 1330 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1328 1331 changed)
1329 1332 m1.set(f, fctx.flags())
1330 1333 except OSError, inst:
1331 1334 self.ui.warn(_("trouble committing %s!\n") % f)
1332 1335 raise
1333 1336 except IOError, inst:
1334 1337 errcode = getattr(inst, 'errno', errno.ENOENT)
1335 1338 if error or errcode and errcode != errno.ENOENT:
1336 1339 self.ui.warn(_("trouble committing %s!\n") % f)
1337 1340 raise
1338 1341 else:
1339 1342 removed.append(f)
1340 1343
1341 1344 # update manifest
1342 1345 m1.update(new)
1343 1346 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1344 1347 drop = [f for f in removed if f in m1]
1345 1348 for f in drop:
1346 1349 del m1[f]
1347 1350 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1348 1351 p2.manifestnode(), (new, drop))
1349 1352 files = changed + removed
1350 1353 else:
1351 1354 mn = p1.manifestnode()
1352 1355 files = []
1353 1356
1354 1357 # update changelog
1355 1358 self.changelog.delayupdate()
1356 1359 n = self.changelog.add(mn, files, ctx.description(),
1357 1360 trp, p1.node(), p2.node(),
1358 1361 user, ctx.date(), ctx.extra().copy())
1359 1362 p = lambda: self.changelog.writepending() and self.root or ""
1360 1363 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1361 1364 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1362 1365 parent2=xp2, pending=p)
1363 1366 self.changelog.finalize(trp)
1364 1367 # set the new commit is proper phase
1365 1368 targetphase = phases.newcommitphase(self.ui)
1366 1369 if targetphase:
1367 1370 # retract boundary do not alter parent changeset.
1368 1371 # if a parent have higher the resulting phase will
1369 1372 # be compliant anyway
1370 1373 #
1371 1374 # if minimal phase was 0 we don't need to retract anything
1372 1375 phases.retractboundary(self, targetphase, [n])
1373 1376 tr.close()
1374 1377 branchmap.updatecache(self.filtered('served'))
1375 1378 return n
1376 1379 finally:
1377 1380 if tr:
1378 1381 tr.release()
1379 1382 lock.release()
1380 1383
1381 1384 @unfilteredmethod
1382 1385 def destroying(self):
1383 1386 '''Inform the repository that nodes are about to be destroyed.
1384 1387 Intended for use by strip and rollback, so there's a common
1385 1388 place for anything that has to be done before destroying history.
1386 1389
1387 1390 This is mostly useful for saving state that is in memory and waiting
1388 1391 to be flushed when the current lock is released. Because a call to
1389 1392 destroyed is imminent, the repo will be invalidated causing those
1390 1393 changes to stay in memory (waiting for the next unlock), or vanish
1391 1394 completely.
1392 1395 '''
1393 1396 # When using the same lock to commit and strip, the phasecache is left
1394 1397 # dirty after committing. Then when we strip, the repo is invalidated,
1395 1398 # causing those changes to disappear.
1396 1399 if '_phasecache' in vars(self):
1397 1400 self._phasecache.write()
1398 1401
1399 1402 @unfilteredmethod
1400 1403 def destroyed(self):
1401 1404 '''Inform the repository that nodes have been destroyed.
1402 1405 Intended for use by strip and rollback, so there's a common
1403 1406 place for anything that has to be done after destroying history.
1404 1407 '''
1405 1408 # When one tries to:
1406 1409 # 1) destroy nodes thus calling this method (e.g. strip)
1407 1410 # 2) use phasecache somewhere (e.g. commit)
1408 1411 #
1409 1412 # then 2) will fail because the phasecache contains nodes that were
1410 1413 # removed. We can either remove phasecache from the filecache,
1411 1414 # causing it to reload next time it is accessed, or simply filter
1412 1415 # the removed nodes now and write the updated cache.
1413 1416 self._phasecache.filterunknown(self)
1414 1417 self._phasecache.write()
1415 1418
1416 1419 # update the 'served' branch cache to help read only server process
1417 1420 # Thanks to branchcache collaboration this is done from the nearest
1418 1421 # filtered subset and it is expected to be fast.
1419 1422 branchmap.updatecache(self.filtered('served'))
1420 1423
1421 1424 # Ensure the persistent tag cache is updated. Doing it now
1422 1425 # means that the tag cache only has to worry about destroyed
1423 1426 # heads immediately after a strip/rollback. That in turn
1424 1427 # guarantees that "cachetip == currenttip" (comparing both rev
1425 1428 # and node) always means no nodes have been added or destroyed.
1426 1429
1427 1430 # XXX this is suboptimal when qrefresh'ing: we strip the current
1428 1431 # head, refresh the tag cache, then immediately add a new head.
1429 1432 # But I think doing it this way is necessary for the "instant
1430 1433 # tag cache retrieval" case to work.
1431 1434 self.invalidate()
1432 1435
1433 1436 def walk(self, match, node=None):
1434 1437 '''
1435 1438 walk recursively through the directory tree or a given
1436 1439 changeset, finding all files matched by the match
1437 1440 function
1438 1441 '''
1439 1442 return self[node].walk(match)
1440 1443
1441 1444 def status(self, node1='.', node2=None, match=None,
1442 1445 ignored=False, clean=False, unknown=False,
1443 1446 listsubrepos=False):
1444 1447 """return status of files between two nodes or node and working
1445 1448 directory.
1446 1449
1447 1450 If node1 is None, use the first dirstate parent instead.
1448 1451 If node2 is None, compare node1 with working directory.
1449 1452 """
1450 1453
1451 1454 def mfmatches(ctx):
1452 1455 mf = ctx.manifest().copy()
1453 1456 if match.always():
1454 1457 return mf
1455 1458 for fn in mf.keys():
1456 1459 if not match(fn):
1457 1460 del mf[fn]
1458 1461 return mf
1459 1462
1460 1463 if isinstance(node1, context.changectx):
1461 1464 ctx1 = node1
1462 1465 else:
1463 1466 ctx1 = self[node1]
1464 1467 if isinstance(node2, context.changectx):
1465 1468 ctx2 = node2
1466 1469 else:
1467 1470 ctx2 = self[node2]
1468 1471
1469 1472 working = ctx2.rev() is None
1470 1473 parentworking = working and ctx1 == self['.']
1471 1474 match = match or matchmod.always(self.root, self.getcwd())
1472 1475 listignored, listclean, listunknown = ignored, clean, unknown
1473 1476
1474 1477 # load earliest manifest first for caching reasons
1475 1478 if not working and ctx2.rev() < ctx1.rev():
1476 1479 ctx2.manifest()
1477 1480
1478 1481 if not parentworking:
1479 1482 def bad(f, msg):
1480 1483 # 'f' may be a directory pattern from 'match.files()',
1481 1484 # so 'f not in ctx1' is not enough
1482 1485 if f not in ctx1 and f not in ctx1.dirs():
1483 1486 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1484 1487 match.bad = bad
1485 1488
1486 1489 if working: # we need to scan the working dir
1487 1490 subrepos = []
1488 1491 if '.hgsub' in self.dirstate:
1489 1492 subrepos = sorted(ctx2.substate)
1490 1493 s = self.dirstate.status(match, subrepos, listignored,
1491 1494 listclean, listunknown)
1492 1495 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1493 1496
1494 1497 # check for any possibly clean files
1495 1498 if parentworking and cmp:
1496 1499 fixup = []
1497 1500 # do a full compare of any files that might have changed
1498 1501 for f in sorted(cmp):
1499 1502 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1500 1503 or ctx1[f].cmp(ctx2[f])):
1501 1504 modified.append(f)
1502 1505 else:
1503 1506 fixup.append(f)
1504 1507
1505 1508 # update dirstate for files that are actually clean
1506 1509 if fixup:
1507 1510 if listclean:
1508 1511 clean += fixup
1509 1512
1510 1513 try:
1511 1514 # updating the dirstate is optional
1512 1515 # so we don't wait on the lock
1513 1516 wlock = self.wlock(False)
1514 1517 try:
1515 1518 for f in fixup:
1516 1519 self.dirstate.normal(f)
1517 1520 finally:
1518 1521 wlock.release()
1519 1522 except error.LockError:
1520 1523 pass
1521 1524
1522 1525 if not parentworking:
1523 1526 mf1 = mfmatches(ctx1)
1524 1527 if working:
1525 1528 # we are comparing working dir against non-parent
1526 1529 # generate a pseudo-manifest for the working dir
1527 1530 mf2 = mfmatches(self['.'])
1528 1531 for f in cmp + modified + added:
1529 1532 mf2[f] = None
1530 1533 mf2.set(f, ctx2.flags(f))
1531 1534 for f in removed:
1532 1535 if f in mf2:
1533 1536 del mf2[f]
1534 1537 else:
1535 1538 # we are comparing two revisions
1536 1539 deleted, unknown, ignored = [], [], []
1537 1540 mf2 = mfmatches(ctx2)
1538 1541
1539 1542 modified, added, clean = [], [], []
1540 1543 withflags = mf1.withflags() | mf2.withflags()
1541 1544 for fn, mf2node in mf2.iteritems():
1542 1545 if fn in mf1:
1543 1546 if (fn not in deleted and
1544 1547 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1545 1548 (mf1[fn] != mf2node and
1546 1549 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1547 1550 modified.append(fn)
1548 1551 elif listclean:
1549 1552 clean.append(fn)
1550 1553 del mf1[fn]
1551 1554 elif fn not in deleted:
1552 1555 added.append(fn)
1553 1556 removed = mf1.keys()
1554 1557
1555 1558 if working and modified and not self.dirstate._checklink:
1556 1559 # Symlink placeholders may get non-symlink-like contents
1557 1560 # via user error or dereferencing by NFS or Samba servers,
1558 1561 # so we filter out any placeholders that don't look like a
1559 1562 # symlink
1560 1563 sane = []
1561 1564 for f in modified:
1562 1565 if ctx2.flags(f) == 'l':
1563 1566 d = ctx2[f].data()
1564 1567 if len(d) >= 1024 or '\n' in d or util.binary(d):
1565 1568 self.ui.debug('ignoring suspect symlink placeholder'
1566 1569 ' "%s"\n' % f)
1567 1570 continue
1568 1571 sane.append(f)
1569 1572 modified = sane
1570 1573
1571 1574 r = modified, added, removed, deleted, unknown, ignored, clean
1572 1575
1573 1576 if listsubrepos:
1574 1577 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1575 1578 if working:
1576 1579 rev2 = None
1577 1580 else:
1578 1581 rev2 = ctx2.substate[subpath][1]
1579 1582 try:
1580 1583 submatch = matchmod.narrowmatcher(subpath, match)
1581 1584 s = sub.status(rev2, match=submatch, ignored=listignored,
1582 1585 clean=listclean, unknown=listunknown,
1583 1586 listsubrepos=True)
1584 1587 for rfiles, sfiles in zip(r, s):
1585 1588 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1586 1589 except error.LookupError:
1587 1590 self.ui.status(_("skipping missing subrepository: %s\n")
1588 1591 % subpath)
1589 1592
1590 1593 for l in r:
1591 1594 l.sort()
1592 1595 return r
1593 1596
1594 1597 def heads(self, start=None):
1595 1598 heads = self.changelog.heads(start)
1596 1599 # sort the output in rev descending order
1597 1600 return sorted(heads, key=self.changelog.rev, reverse=True)
1598 1601
1599 1602 def branchheads(self, branch=None, start=None, closed=False):
1600 1603 '''return a (possibly filtered) list of heads for the given branch
1601 1604
1602 1605 Heads are returned in topological order, from newest to oldest.
1603 1606 If branch is None, use the dirstate branch.
1604 1607 If start is not None, return only heads reachable from start.
1605 1608 If closed is True, return heads that are marked as closed as well.
1606 1609 '''
1607 1610 if branch is None:
1608 1611 branch = self[None].branch()
1609 1612 branches = self.branchmap()
1610 1613 if branch not in branches:
1611 1614 return []
1612 1615 # the cache returns heads ordered lowest to highest
1613 1616 bheads = list(reversed(branches[branch]))
1614 1617 if start is not None:
1615 1618 # filter out the heads that cannot be reached from startrev
1616 1619 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1617 1620 bheads = [h for h in bheads if h in fbheads]
1618 1621 if not closed:
1619 1622 bheads = [h for h in bheads if not self[h].closesbranch()]
1620 1623 return bheads
1621 1624
1622 1625 def branches(self, nodes):
1623 1626 if not nodes:
1624 1627 nodes = [self.changelog.tip()]
1625 1628 b = []
1626 1629 for n in nodes:
1627 1630 t = n
1628 1631 while True:
1629 1632 p = self.changelog.parents(n)
1630 1633 if p[1] != nullid or p[0] == nullid:
1631 1634 b.append((t, n, p[0], p[1]))
1632 1635 break
1633 1636 n = p[0]
1634 1637 return b
1635 1638
1636 1639 def between(self, pairs):
1637 1640 r = []
1638 1641
1639 1642 for top, bottom in pairs:
1640 1643 n, l, i = top, [], 0
1641 1644 f = 1
1642 1645
1643 1646 while n != bottom and n != nullid:
1644 1647 p = self.changelog.parents(n)[0]
1645 1648 if i == f:
1646 1649 l.append(n)
1647 1650 f = f * 2
1648 1651 n = p
1649 1652 i += 1
1650 1653
1651 1654 r.append(l)
1652 1655
1653 1656 return r
1654 1657
1655 1658 def pull(self, remote, heads=None, force=False):
1656 1659 # don't open transaction for nothing or you break future useful
1657 1660 # rollback call
1658 1661 tr = None
1659 1662 trname = 'pull\n' + util.hidepassword(remote.url())
1660 1663 lock = self.lock()
1661 1664 try:
1662 1665 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1663 1666 force=force)
1664 1667 common, fetch, rheads = tmp
1665 1668 if not fetch:
1666 1669 self.ui.status(_("no changes found\n"))
1667 1670 added = []
1668 1671 result = 0
1669 1672 else:
1670 1673 tr = self.transaction(trname)
1671 1674 if heads is None and list(common) == [nullid]:
1672 1675 self.ui.status(_("requesting all changes\n"))
1673 1676 elif heads is None and remote.capable('changegroupsubset'):
1674 1677 # issue1320, avoid a race if remote changed after discovery
1675 1678 heads = rheads
1676 1679
1677 1680 if remote.capable('getbundle'):
1678 1681 # TODO: get bundlecaps from remote
1679 1682 cg = remote.getbundle('pull', common=common,
1680 1683 heads=heads or rheads)
1681 1684 elif heads is None:
1682 1685 cg = remote.changegroup(fetch, 'pull')
1683 1686 elif not remote.capable('changegroupsubset'):
1684 1687 raise util.Abort(_("partial pull cannot be done because "
1685 1688 "other repository doesn't support "
1686 1689 "changegroupsubset."))
1687 1690 else:
1688 1691 cg = remote.changegroupsubset(fetch, heads, 'pull')
1689 1692 # we use unfiltered changelog here because hidden revision must
1690 1693 # be taken in account for phase synchronization. They may
1691 1694 # becomes public and becomes visible again.
1692 1695 cl = self.unfiltered().changelog
1693 1696 clstart = len(cl)
1694 1697 result = self.addchangegroup(cg, 'pull', remote.url())
1695 1698 clend = len(cl)
1696 1699 added = [cl.node(r) for r in xrange(clstart, clend)]
1697 1700
1698 1701 # compute target subset
1699 1702 if heads is None:
1700 1703 # We pulled every thing possible
1701 1704 # sync on everything common
1702 1705 subset = common + added
1703 1706 else:
1704 1707 # We pulled a specific subset
1705 1708 # sync on this subset
1706 1709 subset = heads
1707 1710
1708 1711 # Get remote phases data from remote
1709 1712 remotephases = remote.listkeys('phases')
1710 1713 publishing = bool(remotephases.get('publishing', False))
1711 1714 if remotephases and not publishing:
1712 1715 # remote is new and unpublishing
1713 1716 pheads, _dr = phases.analyzeremotephases(self, subset,
1714 1717 remotephases)
1715 1718 phases.advanceboundary(self, phases.public, pheads)
1716 1719 phases.advanceboundary(self, phases.draft, subset)
1717 1720 else:
1718 1721 # Remote is old or publishing all common changesets
1719 1722 # should be seen as public
1720 1723 phases.advanceboundary(self, phases.public, subset)
1721 1724
1722 1725 def gettransaction():
1723 1726 if tr is None:
1724 1727 return self.transaction(trname)
1725 1728 return tr
1726 1729
1727 1730 obstr = obsolete.syncpull(self, remote, gettransaction)
1728 1731 if obstr is not None:
1729 1732 tr = obstr
1730 1733
1731 1734 if tr is not None:
1732 1735 tr.close()
1733 1736 finally:
1734 1737 if tr is not None:
1735 1738 tr.release()
1736 1739 lock.release()
1737 1740
1738 1741 return result
1739 1742
1740 1743 def checkpush(self, force, revs):
1741 1744 """Extensions can override this function if additional checks have
1742 1745 to be performed before pushing, or call it if they override push
1743 1746 command.
1744 1747 """
1745 1748 pass
1746 1749
1747 1750 def push(self, remote, force=False, revs=None, newbranch=False):
1748 1751 '''Push outgoing changesets (limited by revs) from the current
1749 1752 repository to remote. Return an integer:
1750 1753 - None means nothing to push
1751 1754 - 0 means HTTP error
1752 1755 - 1 means we pushed and remote head count is unchanged *or*
1753 1756 we have outgoing changesets but refused to push
1754 1757 - other values as described by addchangegroup()
1755 1758 '''
1756 1759 # there are two ways to push to remote repo:
1757 1760 #
1758 1761 # addchangegroup assumes local user can lock remote
1759 1762 # repo (local filesystem, old ssh servers).
1760 1763 #
1761 1764 # unbundle assumes local user cannot lock remote repo (new ssh
1762 1765 # servers, http servers).
1763 1766
1764 1767 if not remote.canpush():
1765 1768 raise util.Abort(_("destination does not support push"))
1766 1769 unfi = self.unfiltered()
1767 1770 def localphasemove(nodes, phase=phases.public):
1768 1771 """move <nodes> to <phase> in the local source repo"""
1769 1772 if locallock is not None:
1770 1773 phases.advanceboundary(self, phase, nodes)
1771 1774 else:
1772 1775 # repo is not locked, do not change any phases!
1773 1776 # Informs the user that phases should have been moved when
1774 1777 # applicable.
1775 1778 actualmoves = [n for n in nodes if phase < self[n].phase()]
1776 1779 phasestr = phases.phasenames[phase]
1777 1780 if actualmoves:
1778 1781 self.ui.status(_('cannot lock source repo, skipping local'
1779 1782 ' %s phase update\n') % phasestr)
1780 1783 # get local lock as we might write phase data
1781 1784 locallock = None
1782 1785 try:
1783 1786 locallock = self.lock()
1784 1787 except IOError, err:
1785 1788 if err.errno != errno.EACCES:
1786 1789 raise
1787 1790 # source repo cannot be locked.
1788 1791 # We do not abort the push, but just disable the local phase
1789 1792 # synchronisation.
1790 1793 msg = 'cannot lock source repository: %s\n' % err
1791 1794 self.ui.debug(msg)
1792 1795 try:
1793 1796 self.checkpush(force, revs)
1794 1797 lock = None
1795 1798 unbundle = remote.capable('unbundle')
1796 1799 if not unbundle:
1797 1800 lock = remote.lock()
1798 1801 try:
1799 1802 # discovery
1800 1803 fci = discovery.findcommonincoming
1801 1804 commoninc = fci(unfi, remote, force=force)
1802 1805 common, inc, remoteheads = commoninc
1803 1806 fco = discovery.findcommonoutgoing
1804 1807 outgoing = fco(unfi, remote, onlyheads=revs,
1805 1808 commoninc=commoninc, force=force)
1806 1809
1807 1810
1808 1811 if not outgoing.missing:
1809 1812 # nothing to push
1810 1813 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1811 1814 ret = None
1812 1815 else:
1813 1816 # something to push
1814 1817 if not force:
1815 1818 # if self.obsstore == False --> no obsolete
1816 1819 # then, save the iteration
1817 1820 if unfi.obsstore:
1818 1821 # this message are here for 80 char limit reason
1819 1822 mso = _("push includes obsolete changeset: %s!")
1820 1823 mst = "push includes %s changeset: %s!"
1821 1824 # plain versions for i18n tool to detect them
1822 1825 _("push includes unstable changeset: %s!")
1823 1826 _("push includes bumped changeset: %s!")
1824 1827 _("push includes divergent changeset: %s!")
1825 1828 # If we are to push if there is at least one
1826 1829 # obsolete or unstable changeset in missing, at
1827 1830 # least one of the missinghead will be obsolete or
1828 1831 # unstable. So checking heads only is ok
1829 1832 for node in outgoing.missingheads:
1830 1833 ctx = unfi[node]
1831 1834 if ctx.obsolete():
1832 1835 raise util.Abort(mso % ctx)
1833 1836 elif ctx.troubled():
1834 1837 raise util.Abort(_(mst)
1835 1838 % (ctx.troubles()[0],
1836 1839 ctx))
1837 1840 discovery.checkheads(unfi, remote, outgoing,
1838 1841 remoteheads, newbranch,
1839 1842 bool(inc))
1840 1843
1841 1844 # TODO: get bundlecaps from remote
1842 1845 bundlecaps = None
1843 1846 # create a changegroup from local
1844 1847 if revs is None and not outgoing.excluded:
1845 1848 # push everything,
1846 1849 # use the fast path, no race possible on push
1847 1850 bundler = changegroup.bundle10(self, bundlecaps)
1848 1851 cg = self._changegroupsubset(outgoing,
1849 1852 bundler,
1850 1853 'push',
1851 1854 fastpath=True)
1852 1855 else:
1853 1856 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1854 1857
1855 1858 # apply changegroup to remote
1856 1859 if unbundle:
1857 1860 # local repo finds heads on server, finds out what
1858 1861 # revs it must push. once revs transferred, if server
1859 1862 # finds it has different heads (someone else won
1860 1863 # commit/push race), server aborts.
1861 1864 if force:
1862 1865 remoteheads = ['force']
1863 1866 # ssh: return remote's addchangegroup()
1864 1867 # http: return remote's addchangegroup() or 0 for error
1865 1868 ret = remote.unbundle(cg, remoteheads, 'push')
1866 1869 else:
1867 1870 # we return an integer indicating remote head count
1868 1871 # change
1869 1872 ret = remote.addchangegroup(cg, 'push', self.url())
1870 1873
1871 1874 if ret:
1872 1875 # push succeed, synchronize target of the push
1873 1876 cheads = outgoing.missingheads
1874 1877 elif revs is None:
1875 1878 # All out push fails. synchronize all common
1876 1879 cheads = outgoing.commonheads
1877 1880 else:
1878 1881 # I want cheads = heads(::missingheads and ::commonheads)
1879 1882 # (missingheads is revs with secret changeset filtered out)
1880 1883 #
1881 1884 # This can be expressed as:
1882 1885 # cheads = ( (missingheads and ::commonheads)
1883 1886 # + (commonheads and ::missingheads))"
1884 1887 # )
1885 1888 #
1886 1889 # while trying to push we already computed the following:
1887 1890 # common = (::commonheads)
1888 1891 # missing = ((commonheads::missingheads) - commonheads)
1889 1892 #
1890 1893 # We can pick:
1891 1894 # * missingheads part of common (::commonheads)
1892 1895 common = set(outgoing.common)
1893 1896 cheads = [node for node in revs if node in common]
1894 1897 # and
1895 1898 # * commonheads parents on missing
1896 1899 revset = unfi.set('%ln and parents(roots(%ln))',
1897 1900 outgoing.commonheads,
1898 1901 outgoing.missing)
1899 1902 cheads.extend(c.node() for c in revset)
1900 1903 # even when we don't push, exchanging phase data is useful
1901 1904 remotephases = remote.listkeys('phases')
1902 1905 if (self.ui.configbool('ui', '_usedassubrepo', False)
1903 1906 and remotephases # server supports phases
1904 1907 and ret is None # nothing was pushed
1905 1908 and remotephases.get('publishing', False)):
1906 1909 # When:
1907 1910 # - this is a subrepo push
1908 1911 # - and remote support phase
1909 1912 # - and no changeset was pushed
1910 1913 # - and remote is publishing
1911 1914 # We may be in issue 3871 case!
1912 1915 # We drop the possible phase synchronisation done by
1913 1916 # courtesy to publish changesets possibly locally draft
1914 1917 # on the remote.
1915 1918 remotephases = {'publishing': 'True'}
1916 1919 if not remotephases: # old server or public only repo
1917 1920 localphasemove(cheads)
1918 1921 # don't push any phase data as there is nothing to push
1919 1922 else:
1920 1923 ana = phases.analyzeremotephases(self, cheads, remotephases)
1921 1924 pheads, droots = ana
1922 1925 ### Apply remote phase on local
1923 1926 if remotephases.get('publishing', False):
1924 1927 localphasemove(cheads)
1925 1928 else: # publish = False
1926 1929 localphasemove(pheads)
1927 1930 localphasemove(cheads, phases.draft)
1928 1931 ### Apply local phase on remote
1929 1932
1930 1933 # Get the list of all revs draft on remote by public here.
1931 1934 # XXX Beware that revset break if droots is not strictly
1932 1935 # XXX root we may want to ensure it is but it is costly
1933 1936 outdated = unfi.set('heads((%ln::%ln) and public())',
1934 1937 droots, cheads)
1935 1938 for newremotehead in outdated:
1936 1939 r = remote.pushkey('phases',
1937 1940 newremotehead.hex(),
1938 1941 str(phases.draft),
1939 1942 str(phases.public))
1940 1943 if not r:
1941 1944 self.ui.warn(_('updating %s to public failed!\n')
1942 1945 % newremotehead)
1943 1946 self.ui.debug('try to push obsolete markers to remote\n')
1944 1947 obsolete.syncpush(self, remote)
1945 1948 finally:
1946 1949 if lock is not None:
1947 1950 lock.release()
1948 1951 finally:
1949 1952 if locallock is not None:
1950 1953 locallock.release()
1951 1954
1952 1955 self.ui.debug("checking for updated bookmarks\n")
1953 1956 rb = remote.listkeys('bookmarks')
1954 1957 revnums = map(unfi.changelog.rev, revs or [])
1955 1958 ancestors = [
1956 1959 a for a in unfi.changelog.ancestors(revnums, inclusive=True)]
1957 1960 for k in rb.keys():
1958 1961 if k in unfi._bookmarks:
1959 1962 nr, nl = rb[k], hex(self._bookmarks[k])
1960 1963 if nr in unfi:
1961 1964 cr = unfi[nr]
1962 1965 cl = unfi[nl]
1963 1966 if bookmarks.validdest(unfi, cr, cl):
1964 1967 if ancestors and cl.rev() not in ancestors:
1965 1968 continue
1966 1969 r = remote.pushkey('bookmarks', k, nr, nl)
1967 1970 if r:
1968 1971 self.ui.status(_("updating bookmark %s\n") % k)
1969 1972 else:
1970 1973 self.ui.warn(_('updating bookmark %s'
1971 1974 ' failed!\n') % k)
1972 1975
1973 1976 return ret
1974 1977
1975 1978 def changegroupinfo(self, nodes, source):
1976 1979 if self.ui.verbose or source == 'bundle':
1977 1980 self.ui.status(_("%d changesets found\n") % len(nodes))
1978 1981 if self.ui.debugflag:
1979 1982 self.ui.debug("list of changesets:\n")
1980 1983 for node in nodes:
1981 1984 self.ui.debug("%s\n" % hex(node))
1982 1985
1983 1986 def changegroupsubset(self, bases, heads, source):
1984 1987 """Compute a changegroup consisting of all the nodes that are
1985 1988 descendants of any of the bases and ancestors of any of the heads.
1986 1989 Return a chunkbuffer object whose read() method will return
1987 1990 successive changegroup chunks.
1988 1991
1989 1992 It is fairly complex as determining which filenodes and which
1990 1993 manifest nodes need to be included for the changeset to be complete
1991 1994 is non-trivial.
1992 1995
1993 1996 Another wrinkle is doing the reverse, figuring out which changeset in
1994 1997 the changegroup a particular filenode or manifestnode belongs to.
1995 1998 """
1996 1999 cl = self.changelog
1997 2000 if not bases:
1998 2001 bases = [nullid]
1999 2002 # TODO: remove call to nodesbetween.
2000 2003 csets, bases, heads = cl.nodesbetween(bases, heads)
2001 2004 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
2002 2005 outgoing = discovery.outgoing(cl, bases, heads)
2003 2006 bundler = changegroup.bundle10(self)
2004 2007 return self._changegroupsubset(outgoing, bundler, source)
2005 2008
2006 2009 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2007 2010 """Like getbundle, but taking a discovery.outgoing as an argument.
2008 2011
2009 2012 This is only implemented for local repos and reuses potentially
2010 2013 precomputed sets in outgoing."""
2011 2014 if not outgoing.missing:
2012 2015 return None
2013 2016 bundler = changegroup.bundle10(self, bundlecaps)
2014 2017 return self._changegroupsubset(outgoing, bundler, source)
2015 2018
2016 2019 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2017 2020 """Like changegroupsubset, but returns the set difference between the
2018 2021 ancestors of heads and the ancestors common.
2019 2022
2020 2023 If heads is None, use the local heads. If common is None, use [nullid].
2021 2024
2022 2025 The nodes in common might not all be known locally due to the way the
2023 2026 current discovery protocol works.
2024 2027 """
2025 2028 cl = self.changelog
2026 2029 if common:
2027 2030 hasnode = cl.hasnode
2028 2031 common = [n for n in common if hasnode(n)]
2029 2032 else:
2030 2033 common = [nullid]
2031 2034 if not heads:
2032 2035 heads = cl.heads()
2033 2036 return self.getlocalbundle(source,
2034 2037 discovery.outgoing(cl, common, heads),
2035 2038 bundlecaps=bundlecaps)
2036 2039
2037 2040 @unfilteredmethod
2038 2041 def _changegroupsubset(self, outgoing, bundler, source,
2039 2042 fastpath=False):
2040 2043 commonrevs = outgoing.common
2041 2044 csets = outgoing.missing
2042 2045 heads = outgoing.missingheads
2043 2046 # We go through the fast path if we get told to, or if all (unfiltered
2044 2047 # heads have been requested (since we then know there all linkrevs will
2045 2048 # be pulled by the client).
2046 2049 heads.sort()
2047 2050 fastpathlinkrev = fastpath or (
2048 2051 self.filtername is None and heads == sorted(self.heads()))
2049 2052
2050 2053 self.hook('preoutgoing', throw=True, source=source)
2051 2054 self.changegroupinfo(csets, source)
2052 2055 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2053 2056 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2054 2057
2055 2058 def changegroup(self, basenodes, source):
2056 2059 # to avoid a race we use changegroupsubset() (issue1320)
2057 2060 return self.changegroupsubset(basenodes, self.heads(), source)
2058 2061
2059 2062 @unfilteredmethod
2060 2063 def addchangegroup(self, source, srctype, url, emptyok=False):
2061 2064 """Add the changegroup returned by source.read() to this repo.
2062 2065 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2063 2066 the URL of the repo where this changegroup is coming from.
2064 2067
2065 2068 Return an integer summarizing the change to this repo:
2066 2069 - nothing changed or no source: 0
2067 2070 - more heads than before: 1+added heads (2..n)
2068 2071 - fewer heads than before: -1-removed heads (-2..-n)
2069 2072 - number of heads stays the same: 1
2070 2073 """
2071 2074 def csmap(x):
2072 2075 self.ui.debug("add changeset %s\n" % short(x))
2073 2076 return len(cl)
2074 2077
2075 2078 def revmap(x):
2076 2079 return cl.rev(x)
2077 2080
2078 2081 if not source:
2079 2082 return 0
2080 2083
2081 2084 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2082 2085
2083 2086 changesets = files = revisions = 0
2084 2087 efiles = set()
2085 2088
2086 2089 # write changelog data to temp files so concurrent readers will not see
2087 2090 # inconsistent view
2088 2091 cl = self.changelog
2089 2092 cl.delayupdate()
2090 2093 oldheads = cl.heads()
2091 2094
2092 2095 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2093 2096 try:
2094 2097 trp = weakref.proxy(tr)
2095 2098 # pull off the changeset group
2096 2099 self.ui.status(_("adding changesets\n"))
2097 2100 clstart = len(cl)
2098 2101 class prog(object):
2099 2102 step = _('changesets')
2100 2103 count = 1
2101 2104 ui = self.ui
2102 2105 total = None
2103 2106 def __call__(self):
2104 2107 self.ui.progress(self.step, self.count, unit=_('chunks'),
2105 2108 total=self.total)
2106 2109 self.count += 1
2107 2110 pr = prog()
2108 2111 source.callback = pr
2109 2112
2110 2113 source.changelogheader()
2111 2114 srccontent = cl.addgroup(source, csmap, trp)
2112 2115 if not (srccontent or emptyok):
2113 2116 raise util.Abort(_("received changelog group is empty"))
2114 2117 clend = len(cl)
2115 2118 changesets = clend - clstart
2116 2119 for c in xrange(clstart, clend):
2117 2120 efiles.update(self[c].files())
2118 2121 efiles = len(efiles)
2119 2122 self.ui.progress(_('changesets'), None)
2120 2123
2121 2124 # pull off the manifest group
2122 2125 self.ui.status(_("adding manifests\n"))
2123 2126 pr.step = _('manifests')
2124 2127 pr.count = 1
2125 2128 pr.total = changesets # manifests <= changesets
2126 2129 # no need to check for empty manifest group here:
2127 2130 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2128 2131 # no new manifest will be created and the manifest group will
2129 2132 # be empty during the pull
2130 2133 source.manifestheader()
2131 2134 self.manifest.addgroup(source, revmap, trp)
2132 2135 self.ui.progress(_('manifests'), None)
2133 2136
2134 2137 needfiles = {}
2135 2138 if self.ui.configbool('server', 'validate', default=False):
2136 2139 # validate incoming csets have their manifests
2137 2140 for cset in xrange(clstart, clend):
2138 2141 mfest = self.changelog.read(self.changelog.node(cset))[0]
2139 2142 mfest = self.manifest.readdelta(mfest)
2140 2143 # store file nodes we must see
2141 2144 for f, n in mfest.iteritems():
2142 2145 needfiles.setdefault(f, set()).add(n)
2143 2146
2144 2147 # process the files
2145 2148 self.ui.status(_("adding file changes\n"))
2146 2149 pr.step = _('files')
2147 2150 pr.count = 1
2148 2151 pr.total = efiles
2149 2152 source.callback = None
2150 2153
2151 2154 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2152 2155 pr, needfiles)
2153 2156 revisions += newrevs
2154 2157 files += newfiles
2155 2158
2156 2159 dh = 0
2157 2160 if oldheads:
2158 2161 heads = cl.heads()
2159 2162 dh = len(heads) - len(oldheads)
2160 2163 for h in heads:
2161 2164 if h not in oldheads and self[h].closesbranch():
2162 2165 dh -= 1
2163 2166 htext = ""
2164 2167 if dh:
2165 2168 htext = _(" (%+d heads)") % dh
2166 2169
2167 2170 self.ui.status(_("added %d changesets"
2168 2171 " with %d changes to %d files%s\n")
2169 2172 % (changesets, revisions, files, htext))
2170 2173 self.invalidatevolatilesets()
2171 2174
2172 2175 if changesets > 0:
2173 2176 p = lambda: cl.writepending() and self.root or ""
2174 2177 self.hook('pretxnchangegroup', throw=True,
2175 2178 node=hex(cl.node(clstart)), source=srctype,
2176 2179 url=url, pending=p)
2177 2180
2178 2181 added = [cl.node(r) for r in xrange(clstart, clend)]
2179 2182 publishing = self.ui.configbool('phases', 'publish', True)
2180 2183 if srctype == 'push':
2181 2184 # Old server can not push the boundary themself.
2182 2185 # New server won't push the boundary if changeset already
2183 2186 # existed locally as secrete
2184 2187 #
2185 2188 # We should not use added here but the list of all change in
2186 2189 # the bundle
2187 2190 if publishing:
2188 2191 phases.advanceboundary(self, phases.public, srccontent)
2189 2192 else:
2190 2193 phases.advanceboundary(self, phases.draft, srccontent)
2191 2194 phases.retractboundary(self, phases.draft, added)
2192 2195 elif srctype != 'strip':
2193 2196 # publishing only alter behavior during push
2194 2197 #
2195 2198 # strip should not touch boundary at all
2196 2199 phases.retractboundary(self, phases.draft, added)
2197 2200
2198 2201 # make changelog see real files again
2199 2202 cl.finalize(trp)
2200 2203
2201 2204 tr.close()
2202 2205
2203 2206 if changesets > 0:
2204 2207 if srctype != 'strip':
2205 2208 # During strip, branchcache is invalid but coming call to
2206 2209 # `destroyed` will repair it.
2207 2210 # In other case we can safely update cache on disk.
2208 2211 branchmap.updatecache(self.filtered('served'))
2209 2212 def runhooks():
2210 2213 # forcefully update the on-disk branch cache
2211 2214 self.ui.debug("updating the branch cache\n")
2212 2215 self.hook("changegroup", node=hex(cl.node(clstart)),
2213 2216 source=srctype, url=url)
2214 2217
2215 2218 for n in added:
2216 2219 self.hook("incoming", node=hex(n), source=srctype,
2217 2220 url=url)
2218 2221
2219 2222 newheads = [h for h in self.heads() if h not in oldheads]
2220 2223 self.ui.log("incoming",
2221 2224 "%s incoming changes - new heads: %s\n",
2222 2225 len(added),
2223 2226 ', '.join([hex(c[:6]) for c in newheads]))
2224 2227 self._afterlock(runhooks)
2225 2228
2226 2229 finally:
2227 2230 tr.release()
2228 2231 # never return 0 here:
2229 2232 if dh < 0:
2230 2233 return dh - 1
2231 2234 else:
2232 2235 return dh + 1
2233 2236
2234 2237 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2235 2238 revisions = 0
2236 2239 files = 0
2237 2240 while True:
2238 2241 chunkdata = source.filelogheader()
2239 2242 if not chunkdata:
2240 2243 break
2241 2244 f = chunkdata["filename"]
2242 2245 self.ui.debug("adding %s revisions\n" % f)
2243 2246 pr()
2244 2247 fl = self.file(f)
2245 2248 o = len(fl)
2246 2249 if not fl.addgroup(source, revmap, trp):
2247 2250 raise util.Abort(_("received file revlog group is empty"))
2248 2251 revisions += len(fl) - o
2249 2252 files += 1
2250 2253 if f in needfiles:
2251 2254 needs = needfiles[f]
2252 2255 for new in xrange(o, len(fl)):
2253 2256 n = fl.node(new)
2254 2257 if n in needs:
2255 2258 needs.remove(n)
2256 2259 else:
2257 2260 raise util.Abort(
2258 2261 _("received spurious file revlog entry"))
2259 2262 if not needs:
2260 2263 del needfiles[f]
2261 2264 self.ui.progress(_('files'), None)
2262 2265
2263 2266 for f, needs in needfiles.iteritems():
2264 2267 fl = self.file(f)
2265 2268 for n in needs:
2266 2269 try:
2267 2270 fl.rev(n)
2268 2271 except error.LookupError:
2269 2272 raise util.Abort(
2270 2273 _('missing file data for %s:%s - run hg verify') %
2271 2274 (f, hex(n)))
2272 2275
2273 2276 return revisions, files
2274 2277
2275 2278 def stream_in(self, remote, requirements):
2276 2279 lock = self.lock()
2277 2280 try:
2278 2281 # Save remote branchmap. We will use it later
2279 2282 # to speed up branchcache creation
2280 2283 rbranchmap = None
2281 2284 if remote.capable("branchmap"):
2282 2285 rbranchmap = remote.branchmap()
2283 2286
2284 2287 fp = remote.stream_out()
2285 2288 l = fp.readline()
2286 2289 try:
2287 2290 resp = int(l)
2288 2291 except ValueError:
2289 2292 raise error.ResponseError(
2290 2293 _('unexpected response from remote server:'), l)
2291 2294 if resp == 1:
2292 2295 raise util.Abort(_('operation forbidden by server'))
2293 2296 elif resp == 2:
2294 2297 raise util.Abort(_('locking the remote repository failed'))
2295 2298 elif resp != 0:
2296 2299 raise util.Abort(_('the server sent an unknown error code'))
2297 2300 self.ui.status(_('streaming all changes\n'))
2298 2301 l = fp.readline()
2299 2302 try:
2300 2303 total_files, total_bytes = map(int, l.split(' ', 1))
2301 2304 except (ValueError, TypeError):
2302 2305 raise error.ResponseError(
2303 2306 _('unexpected response from remote server:'), l)
2304 2307 self.ui.status(_('%d files to transfer, %s of data\n') %
2305 2308 (total_files, util.bytecount(total_bytes)))
2306 2309 handled_bytes = 0
2307 2310 self.ui.progress(_('clone'), 0, total=total_bytes)
2308 2311 start = time.time()
2309 2312 for i in xrange(total_files):
2310 2313 # XXX doesn't support '\n' or '\r' in filenames
2311 2314 l = fp.readline()
2312 2315 try:
2313 2316 name, size = l.split('\0', 1)
2314 2317 size = int(size)
2315 2318 except (ValueError, TypeError):
2316 2319 raise error.ResponseError(
2317 2320 _('unexpected response from remote server:'), l)
2318 2321 if self.ui.debugflag:
2319 2322 self.ui.debug('adding %s (%s)\n' %
2320 2323 (name, util.bytecount(size)))
2321 2324 # for backwards compat, name was partially encoded
2322 2325 ofp = self.sopener(store.decodedir(name), 'w')
2323 2326 for chunk in util.filechunkiter(fp, limit=size):
2324 2327 handled_bytes += len(chunk)
2325 2328 self.ui.progress(_('clone'), handled_bytes,
2326 2329 total=total_bytes)
2327 2330 ofp.write(chunk)
2328 2331 ofp.close()
2329 2332 elapsed = time.time() - start
2330 2333 if elapsed <= 0:
2331 2334 elapsed = 0.001
2332 2335 self.ui.progress(_('clone'), None)
2333 2336 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2334 2337 (util.bytecount(total_bytes), elapsed,
2335 2338 util.bytecount(total_bytes / elapsed)))
2336 2339
2337 2340 # new requirements = old non-format requirements +
2338 2341 # new format-related
2339 2342 # requirements from the streamed-in repository
2340 2343 requirements.update(set(self.requirements) - self.supportedformats)
2341 2344 self._applyrequirements(requirements)
2342 2345 self._writerequirements()
2343 2346
2344 2347 if rbranchmap:
2345 2348 rbheads = []
2346 2349 for bheads in rbranchmap.itervalues():
2347 2350 rbheads.extend(bheads)
2348 2351
2349 2352 if rbheads:
2350 2353 rtiprev = max((int(self.changelog.rev(node))
2351 2354 for node in rbheads))
2352 2355 cache = branchmap.branchcache(rbranchmap,
2353 2356 self[rtiprev].node(),
2354 2357 rtiprev)
2355 2358 # Try to stick it as low as possible
2356 2359 # filter above served are unlikely to be fetch from a clone
2357 2360 for candidate in ('base', 'immutable', 'served'):
2358 2361 rview = self.filtered(candidate)
2359 2362 if cache.validfor(rview):
2360 2363 self._branchcaches[candidate] = cache
2361 2364 cache.write(rview)
2362 2365 break
2363 2366 self.invalidate()
2364 2367 return len(self.heads()) + 1
2365 2368 finally:
2366 2369 lock.release()
2367 2370
2368 2371 def clone(self, remote, heads=[], stream=False):
2369 2372 '''clone remote repository.
2370 2373
2371 2374 keyword arguments:
2372 2375 heads: list of revs to clone (forces use of pull)
2373 2376 stream: use streaming clone if possible'''
2374 2377
2375 2378 # now, all clients that can request uncompressed clones can
2376 2379 # read repo formats supported by all servers that can serve
2377 2380 # them.
2378 2381
2379 2382 # if revlog format changes, client will have to check version
2380 2383 # and format flags on "stream" capability, and use
2381 2384 # uncompressed only if compatible.
2382 2385
2383 2386 if not stream:
2384 2387 # if the server explicitly prefers to stream (for fast LANs)
2385 2388 stream = remote.capable('stream-preferred')
2386 2389
2387 2390 if stream and not heads:
2388 2391 # 'stream' means remote revlog format is revlogv1 only
2389 2392 if remote.capable('stream'):
2390 2393 return self.stream_in(remote, set(('revlogv1',)))
2391 2394 # otherwise, 'streamreqs' contains the remote revlog format
2392 2395 streamreqs = remote.capable('streamreqs')
2393 2396 if streamreqs:
2394 2397 streamreqs = set(streamreqs.split(','))
2395 2398 # if we support it, stream in and adjust our requirements
2396 2399 if not streamreqs - self.supportedformats:
2397 2400 return self.stream_in(remote, streamreqs)
2398 2401 return self.pull(remote, heads)
2399 2402
2400 2403 def pushkey(self, namespace, key, old, new):
2401 2404 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2402 2405 old=old, new=new)
2403 2406 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2404 2407 ret = pushkey.push(self, namespace, key, old, new)
2405 2408 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2406 2409 ret=ret)
2407 2410 return ret
2408 2411
2409 2412 def listkeys(self, namespace):
2410 2413 self.hook('prelistkeys', throw=True, namespace=namespace)
2411 2414 self.ui.debug('listing keys for "%s"\n' % namespace)
2412 2415 values = pushkey.list(self, namespace)
2413 2416 self.hook('listkeys', namespace=namespace, values=values)
2414 2417 return values
2415 2418
2416 2419 def debugwireargs(self, one, two, three=None, four=None, five=None):
2417 2420 '''used to test argument passing over the wire'''
2418 2421 return "%s %s %s %s %s" % (one, two, three, four, five)
2419 2422
2420 2423 def savecommitmessage(self, text):
2421 2424 fp = self.opener('last-message.txt', 'wb')
2422 2425 try:
2423 2426 fp.write(text)
2424 2427 finally:
2425 2428 fp.close()
2426 2429 return self.pathto(fp.name[len(self.root) + 1:])
2427 2430
2428 2431 # used to avoid circular references so destructors work
2429 2432 def aftertrans(files):
2430 2433 renamefiles = [tuple(t) for t in files]
2431 2434 def a():
2432 2435 for vfs, src, dest in renamefiles:
2433 2436 try:
2434 2437 vfs.rename(src, dest)
2435 2438 except OSError: # journal file does not yet exist
2436 2439 pass
2437 2440 return a
2438 2441
2439 2442 def undoname(fn):
2440 2443 base, name = os.path.split(fn)
2441 2444 assert name.startswith('journal')
2442 2445 return os.path.join(base, name.replace('journal', 'undo', 1))
2443 2446
2444 2447 def instance(ui, path, create):
2445 2448 return localrepository(ui, util.urllocalpath(path), create)
2446 2449
2447 2450 def islocal(path):
2448 2451 return True
@@ -1,94 +1,179 b''
1 1 """test behavior of propertycache and unfiltered propertycache
2 2
3 3 The repoview overlay is quite complexe. We test the behavior of
4 4 property cache of both localrepo and repoview to prevent
5 5 regression."""
6 6
7 7 import os, subprocess
8 8 import mercurial.localrepo
9 9 import mercurial.repoview
10 10 import mercurial.util
11 11 import mercurial.hg
12 12 import mercurial.ui as uimod
13 13
14 14
15 15 # create some special property cache that trace they call
16 16
17 17 calllog = []
18 18 @mercurial.util.propertycache
19 19 def testcachedfoobar(repo):
20 20 name = repo.filtername
21 21 if name is None:
22 22 name = ''
23 23 val = len(name)
24 24 calllog.append(val)
25 25 return val
26 26
27 unficalllog = []
28 @mercurial.localrepo.unfilteredpropertycache
29 def testcachedunfifoobar(repo):
30 name = repo.filtername
31 if name is None:
32 name = ''
33 val = 100 + len(name)
34 unficalllog.append(val)
35 return val
36
27 37 #plug them on repo
28 38 mercurial.localrepo.localrepository.testcachedfoobar = testcachedfoobar
39 mercurial.localrepo.localrepository.testcachedunfifoobar = testcachedunfifoobar
29 40
30 41
31 42 # create an empty repo. and instanciate it. It is important to run
32 43 # those test on the real object to detect regression.
33 44 repopath = os.path.join(os.environ['TESTTMP'], 'repo')
34 45 subprocess.check_call(['hg', 'init', repopath])
35 46 ui = uimod.ui()
36 47 repo = mercurial.hg.repository(ui, path=repopath).unfiltered()
37 48
38 49
39 50 print ''
40 51 print '=== property cache ==='
41 52 print ''
42 53 print 'calllog:', calllog
43 54 print 'cached value (unfiltered):',
44 55 print vars(repo).get('testcachedfoobar', 'NOCACHE')
45 56
46 57 print ''
47 58 print '= first access on unfiltered, should do a call'
48 59 print 'access:', repo.testcachedfoobar
49 60 print 'calllog:', calllog
50 61 print 'cached value (unfiltered):',
51 62 print vars(repo).get('testcachedfoobar', 'NOCACHE')
52 63
53 64 print ''
54 65 print '= second access on unfiltered, should not do call'
55 66 print 'access', repo.testcachedfoobar
56 67 print 'calllog:', calllog
57 68 print 'cached value (unfiltered):',
58 69 print vars(repo).get('testcachedfoobar', 'NOCACHE')
59 70
60 71 print ''
61 72 print '= first access on "visible" view, should do a call'
62 73 visibleview = repo.filtered('visible')
63 74 print 'cached value ("visible" view):',
64 75 print vars(visibleview).get('testcachedfoobar', 'NOCACHE')
65 76 print 'access:', visibleview.testcachedfoobar
66 77 print 'calllog:', calllog
67 78 print 'cached value (unfiltered):',
68 79 print vars(repo).get('testcachedfoobar', 'NOCACHE')
69 80 print 'cached value ("visible" view):',
70 81 print vars(visibleview).get('testcachedfoobar', 'NOCACHE')
71 82
72 83 print ''
73 84 print '= second access on "visible view", should not do call'
74 85 print 'access:', visibleview.testcachedfoobar
75 86 print 'calllog:', calllog
76 87 print 'cached value (unfiltered):',
77 88 print vars(repo).get('testcachedfoobar', 'NOCACHE')
78 89 print 'cached value ("visible" view):',
79 90 print vars(visibleview).get('testcachedfoobar', 'NOCACHE')
80 91
81 92 print ''
82 93 print '= no effect on other view'
83 94 immutableview = repo.filtered('immutable')
84 95 print 'cached value ("immutable" view):',
85 96 print vars(immutableview).get('testcachedfoobar', 'NOCACHE')
86 97 print 'access:', immutableview.testcachedfoobar
87 98 print 'calllog:', calllog
88 99 print 'cached value (unfiltered):',
89 100 print vars(repo).get('testcachedfoobar', 'NOCACHE')
90 101 print 'cached value ("visible" view):',
91 102 print vars(visibleview).get('testcachedfoobar', 'NOCACHE')
92 103 print 'cached value ("immutable" view):',
93 104 print vars(immutableview).get('testcachedfoobar', 'NOCACHE')
94 105
106 # unfiltered property cache test
107 print ''
108 print ''
109 print '=== unfiltered property cache ==='
110 print ''
111 print 'unficalllog:', unficalllog
112 print 'cached value (unfiltered): ',
113 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
114 print 'cached value ("visible" view): ',
115 print vars(visibleview).get('testcachedunfifoobar', 'NOCACHE')
116 print 'cached value ("immutable" view):',
117 print vars(immutableview).get('testcachedunfifoobar', 'NOCACHE')
118
119 print ''
120 print '= first access on unfiltered, should do a call'
121 print 'access (unfiltered):', repo.testcachedunfifoobar
122 print 'unficalllog:', unficalllog
123 print 'cached value (unfiltered): ',
124 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
125
126 print ''
127 print '= second access on unfiltered, should not do call'
128 print 'access (unfiltered):', repo.testcachedunfifoobar
129 print 'unficalllog:', unficalllog
130 print 'cached value (unfiltered): ',
131 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
132
133 print ''
134 print '= access on view should use the unfiltered cache'
135 print 'access (unfiltered): ', repo.testcachedunfifoobar
136 print 'access ("visible" view): ', visibleview.testcachedunfifoobar
137 print 'access ("immutable" view):', immutableview.testcachedunfifoobar
138 print 'unficalllog:', unficalllog
139 print 'cached value (unfiltered): ',
140 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
141 print 'cached value ("visible" view): ',
142 print vars(visibleview).get('testcachedunfifoobar', 'NOCACHE')
143 print 'cached value ("immutable" view):',
144 print vars(immutableview).get('testcachedunfifoobar', 'NOCACHE')
145
146 print ''
147 print '= even if we clear the unfiltered cache'
148 del repo.__dict__['testcachedunfifoobar']
149 print 'cached value (unfiltered): ',
150 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
151 print 'cached value ("visible" view): ',
152 print vars(visibleview).get('testcachedunfifoobar', 'NOCACHE')
153 print 'cached value ("immutable" view):',
154 print vars(immutableview).get('testcachedunfifoobar', 'NOCACHE')
155 print 'unficalllog:', unficalllog
156 print 'access ("visible" view): ', visibleview.testcachedunfifoobar
157 print 'unficalllog:', unficalllog
158 print 'cached value (unfiltered): ',
159 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
160 print 'cached value ("visible" view): ',
161 print vars(visibleview).get('testcachedunfifoobar', 'NOCACHE')
162 print 'cached value ("immutable" view):',
163 print vars(immutableview).get('testcachedunfifoobar', 'NOCACHE')
164 print 'access ("immutable" view):', immutableview.testcachedunfifoobar
165 print 'unficalllog:', unficalllog
166 print 'cached value (unfiltered): ',
167 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
168 print 'cached value ("visible" view): ',
169 print vars(visibleview).get('testcachedunfifoobar', 'NOCACHE')
170 print 'cached value ("immutable" view):',
171 print vars(immutableview).get('testcachedunfifoobar', 'NOCACHE')
172 print 'access (unfiltered): ', repo.testcachedunfifoobar
173 print 'unficalllog:', unficalllog
174 print 'cached value (unfiltered): ',
175 print vars(repo).get('testcachedunfifoobar', 'NOCACHE')
176 print 'cached value ("visible" view): ',
177 print vars(visibleview).get('testcachedunfifoobar', 'NOCACHE')
178 print 'cached value ("immutable" view):',
179 print vars(immutableview).get('testcachedunfifoobar', 'NOCACHE')
@@ -1,36 +1,84 b''
1 1
2 2 === property cache ===
3 3
4 4 calllog: []
5 5 cached value (unfiltered): NOCACHE
6 6
7 7 = first access on unfiltered, should do a call
8 8 access: 0
9 9 calllog: [0]
10 10 cached value (unfiltered): 0
11 11
12 12 = second access on unfiltered, should not do call
13 13 access 0
14 14 calllog: [0]
15 15 cached value (unfiltered): 0
16 16
17 17 = first access on "visible" view, should do a call
18 18 cached value ("visible" view): NOCACHE
19 19 access: 7
20 20 calllog: [0, 7]
21 21 cached value (unfiltered): 0
22 22 cached value ("visible" view): 7
23 23
24 24 = second access on "visible view", should not do call
25 25 access: 7
26 26 calllog: [0, 7]
27 27 cached value (unfiltered): 0
28 28 cached value ("visible" view): 7
29 29
30 30 = no effect on other view
31 31 cached value ("immutable" view): NOCACHE
32 32 access: 9
33 33 calllog: [0, 7, 9]
34 34 cached value (unfiltered): 0
35 35 cached value ("visible" view): 7
36 36 cached value ("immutable" view): 9
37
38
39 === unfiltered property cache ===
40
41 unficalllog: []
42 cached value (unfiltered): NOCACHE
43 cached value ("visible" view): NOCACHE
44 cached value ("immutable" view): NOCACHE
45
46 = first access on unfiltered, should do a call
47 access (unfiltered): 100
48 unficalllog: [100]
49 cached value (unfiltered): 100
50
51 = second access on unfiltered, should not do call
52 access (unfiltered): 100
53 unficalllog: [100]
54 cached value (unfiltered): 100
55
56 = access on view should use the unfiltered cache
57 access (unfiltered): 100
58 access ("visible" view): 100
59 access ("immutable" view): 100
60 unficalllog: [100]
61 cached value (unfiltered): 100
62 cached value ("visible" view): NOCACHE
63 cached value ("immutable" view): NOCACHE
64
65 = even if we clear the unfiltered cache
66 cached value (unfiltered): NOCACHE
67 cached value ("visible" view): NOCACHE
68 cached value ("immutable" view): NOCACHE
69 unficalllog: [100]
70 access ("visible" view): 100
71 unficalllog: [100, 100]
72 cached value (unfiltered): 100
73 cached value ("visible" view): NOCACHE
74 cached value ("immutable" view): NOCACHE
75 access ("immutable" view): 100
76 unficalllog: [100, 100]
77 cached value (unfiltered): 100
78 cached value ("visible" view): NOCACHE
79 cached value ("immutable" view): NOCACHE
80 access (unfiltered): 100
81 unficalllog: [100, 100]
82 cached value (unfiltered): 100
83 cached value ("visible" view): NOCACHE
84 cached value ("immutable" view): NOCACHE
General Comments 0
You need to be logged in to leave comments. Login now