##// END OF EJS Templates
localrepo: get value from the unfiltered caches should check if the attribute existed....
Wei, Elson -
r19635:b9b7dc26 default
parent child Browse files
Show More
@@ -1,2442 +1,2444
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 if hasunfilteredcache(repo, self.name):
43 return getattr(repo.unfiltered(), self.name)
42 44 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 45
44 46 class filteredpropertycache(propertycache):
45 47 """propertycache that must take filtering in account"""
46 48
47 49 def cachevalue(self, obj, value):
48 50 object.__setattr__(obj, self.name, value)
49 51
50 52
51 53 def hasunfilteredcache(repo, name):
52 54 """check if a repo has an unfilteredpropertycache value for <name>"""
53 55 return name in vars(repo.unfiltered())
54 56
55 57 def unfilteredmethod(orig):
56 58 """decorate method that always need to be run on unfiltered version"""
57 59 def wrapper(repo, *args, **kwargs):
58 60 return orig(repo.unfiltered(), *args, **kwargs)
59 61 return wrapper
60 62
61 63 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 64 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 65
64 66 class localpeer(peer.peerrepository):
65 67 '''peer for a local repo; reflects only the most recent API'''
66 68
67 69 def __init__(self, repo, caps=MODERNCAPS):
68 70 peer.peerrepository.__init__(self)
69 71 self._repo = repo.filtered('served')
70 72 self.ui = repo.ui
71 73 self._caps = repo._restrictcapabilities(caps)
72 74 self.requirements = repo.requirements
73 75 self.supportedformats = repo.supportedformats
74 76
75 77 def close(self):
76 78 self._repo.close()
77 79
78 80 def _capabilities(self):
79 81 return self._caps
80 82
81 83 def local(self):
82 84 return self._repo
83 85
84 86 def canpush(self):
85 87 return True
86 88
87 89 def url(self):
88 90 return self._repo.url()
89 91
90 92 def lookup(self, key):
91 93 return self._repo.lookup(key)
92 94
93 95 def branchmap(self):
94 96 return self._repo.branchmap()
95 97
96 98 def heads(self):
97 99 return self._repo.heads()
98 100
99 101 def known(self, nodes):
100 102 return self._repo.known(nodes)
101 103
102 104 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
103 105 return self._repo.getbundle(source, heads=heads, common=common,
104 106 bundlecaps=None)
105 107
106 108 # TODO We might want to move the next two calls into legacypeer and add
107 109 # unbundle instead.
108 110
109 111 def lock(self):
110 112 return self._repo.lock()
111 113
112 114 def addchangegroup(self, cg, source, url):
113 115 return self._repo.addchangegroup(cg, source, url)
114 116
115 117 def pushkey(self, namespace, key, old, new):
116 118 return self._repo.pushkey(namespace, key, old, new)
117 119
118 120 def listkeys(self, namespace):
119 121 return self._repo.listkeys(namespace)
120 122
121 123 def debugwireargs(self, one, two, three=None, four=None, five=None):
122 124 '''used to test argument passing over the wire'''
123 125 return "%s %s %s %s %s" % (one, two, three, four, five)
124 126
125 127 class locallegacypeer(localpeer):
126 128 '''peer extension which implements legacy methods too; used for tests with
127 129 restricted capabilities'''
128 130
129 131 def __init__(self, repo):
130 132 localpeer.__init__(self, repo, caps=LEGACYCAPS)
131 133
132 134 def branches(self, nodes):
133 135 return self._repo.branches(nodes)
134 136
135 137 def between(self, pairs):
136 138 return self._repo.between(pairs)
137 139
138 140 def changegroup(self, basenodes, source):
139 141 return self._repo.changegroup(basenodes, source)
140 142
141 143 def changegroupsubset(self, bases, heads, source):
142 144 return self._repo.changegroupsubset(bases, heads, source)
143 145
144 146 class localrepository(object):
145 147
146 148 supportedformats = set(('revlogv1', 'generaldelta'))
147 149 supported = supportedformats | set(('store', 'fncache', 'shared',
148 150 'dotencode'))
149 151 openerreqs = set(('revlogv1', 'generaldelta'))
150 152 requirements = ['revlogv1']
151 153 filtername = None
152 154
153 155 def _baserequirements(self, create):
154 156 return self.requirements[:]
155 157
156 158 def __init__(self, baseui, path=None, create=False):
157 159 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
158 160 self.wopener = self.wvfs
159 161 self.root = self.wvfs.base
160 162 self.path = self.wvfs.join(".hg")
161 163 self.origroot = path
162 164 self.auditor = scmutil.pathauditor(self.root, self._checknested)
163 165 self.vfs = scmutil.vfs(self.path)
164 166 self.opener = self.vfs
165 167 self.baseui = baseui
166 168 self.ui = baseui.copy()
167 169 # A list of callback to shape the phase if no data were found.
168 170 # Callback are in the form: func(repo, roots) --> processed root.
169 171 # This list it to be filled by extension during repo setup
170 172 self._phasedefaults = []
171 173 try:
172 174 self.ui.readconfig(self.join("hgrc"), self.root)
173 175 extensions.loadall(self.ui)
174 176 except IOError:
175 177 pass
176 178
177 179 if not self.vfs.isdir():
178 180 if create:
179 181 if not self.wvfs.exists():
180 182 self.wvfs.makedirs()
181 183 self.vfs.makedir(notindexed=True)
182 184 requirements = self._baserequirements(create)
183 185 if self.ui.configbool('format', 'usestore', True):
184 186 self.vfs.mkdir("store")
185 187 requirements.append("store")
186 188 if self.ui.configbool('format', 'usefncache', True):
187 189 requirements.append("fncache")
188 190 if self.ui.configbool('format', 'dotencode', True):
189 191 requirements.append('dotencode')
190 192 # create an invalid changelog
191 193 self.vfs.append(
192 194 "00changelog.i",
193 195 '\0\0\0\2' # represents revlogv2
194 196 ' dummy changelog to prevent using the old repo layout'
195 197 )
196 198 if self.ui.configbool('format', 'generaldelta', False):
197 199 requirements.append("generaldelta")
198 200 requirements = set(requirements)
199 201 else:
200 202 raise error.RepoError(_("repository %s not found") % path)
201 203 elif create:
202 204 raise error.RepoError(_("repository %s already exists") % path)
203 205 else:
204 206 try:
205 207 requirements = scmutil.readrequires(self.vfs, self.supported)
206 208 except IOError, inst:
207 209 if inst.errno != errno.ENOENT:
208 210 raise
209 211 requirements = set()
210 212
211 213 self.sharedpath = self.path
212 214 try:
213 215 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
214 216 realpath=True)
215 217 s = vfs.base
216 218 if not vfs.exists():
217 219 raise error.RepoError(
218 220 _('.hg/sharedpath points to nonexistent directory %s') % s)
219 221 self.sharedpath = s
220 222 except IOError, inst:
221 223 if inst.errno != errno.ENOENT:
222 224 raise
223 225
224 226 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
225 227 self.spath = self.store.path
226 228 self.svfs = self.store.vfs
227 229 self.sopener = self.svfs
228 230 self.sjoin = self.store.join
229 231 self.vfs.createmode = self.store.createmode
230 232 self._applyrequirements(requirements)
231 233 if create:
232 234 self._writerequirements()
233 235
234 236
235 237 self._branchcaches = {}
236 238 self.filterpats = {}
237 239 self._datafilters = {}
238 240 self._transref = self._lockref = self._wlockref = None
239 241
240 242 # A cache for various files under .hg/ that tracks file changes,
241 243 # (used by the filecache decorator)
242 244 #
243 245 # Maps a property name to its util.filecacheentry
244 246 self._filecache = {}
245 247
246 248 # hold sets of revision to be filtered
247 249 # should be cleared when something might have changed the filter value:
248 250 # - new changesets,
249 251 # - phase change,
250 252 # - new obsolescence marker,
251 253 # - working directory parent change,
252 254 # - bookmark changes
253 255 self.filteredrevcache = {}
254 256
255 257 def close(self):
256 258 pass
257 259
258 260 def _restrictcapabilities(self, caps):
259 261 return caps
260 262
261 263 def _applyrequirements(self, requirements):
262 264 self.requirements = requirements
263 265 self.sopener.options = dict((r, 1) for r in requirements
264 266 if r in self.openerreqs)
265 267
266 268 def _writerequirements(self):
267 269 reqfile = self.opener("requires", "w")
268 270 for r in sorted(self.requirements):
269 271 reqfile.write("%s\n" % r)
270 272 reqfile.close()
271 273
272 274 def _checknested(self, path):
273 275 """Determine if path is a legal nested repository."""
274 276 if not path.startswith(self.root):
275 277 return False
276 278 subpath = path[len(self.root) + 1:]
277 279 normsubpath = util.pconvert(subpath)
278 280
279 281 # XXX: Checking against the current working copy is wrong in
280 282 # the sense that it can reject things like
281 283 #
282 284 # $ hg cat -r 10 sub/x.txt
283 285 #
284 286 # if sub/ is no longer a subrepository in the working copy
285 287 # parent revision.
286 288 #
287 289 # However, it can of course also allow things that would have
288 290 # been rejected before, such as the above cat command if sub/
289 291 # is a subrepository now, but was a normal directory before.
290 292 # The old path auditor would have rejected by mistake since it
291 293 # panics when it sees sub/.hg/.
292 294 #
293 295 # All in all, checking against the working copy seems sensible
294 296 # since we want to prevent access to nested repositories on
295 297 # the filesystem *now*.
296 298 ctx = self[None]
297 299 parts = util.splitpath(subpath)
298 300 while parts:
299 301 prefix = '/'.join(parts)
300 302 if prefix in ctx.substate:
301 303 if prefix == normsubpath:
302 304 return True
303 305 else:
304 306 sub = ctx.sub(prefix)
305 307 return sub.checknested(subpath[len(prefix) + 1:])
306 308 else:
307 309 parts.pop()
308 310 return False
309 311
310 312 def peer(self):
311 313 return localpeer(self) # not cached to avoid reference cycle
312 314
313 315 def unfiltered(self):
314 316 """Return unfiltered version of the repository
315 317
316 318 Intended to be overwritten by filtered repo."""
317 319 return self
318 320
319 321 def filtered(self, name):
320 322 """Return a filtered version of a repository"""
321 323 # build a new class with the mixin and the current class
322 324 # (possibly subclass of the repo)
323 325 class proxycls(repoview.repoview, self.unfiltered().__class__):
324 326 pass
325 327 return proxycls(self, name)
326 328
327 329 @repofilecache('bookmarks')
328 330 def _bookmarks(self):
329 331 return bookmarks.bmstore(self)
330 332
331 333 @repofilecache('bookmarks.current')
332 334 def _bookmarkcurrent(self):
333 335 return bookmarks.readcurrent(self)
334 336
335 337 def bookmarkheads(self, bookmark):
336 338 name = bookmark.split('@', 1)[0]
337 339 heads = []
338 340 for mark, n in self._bookmarks.iteritems():
339 341 if mark.split('@', 1)[0] == name:
340 342 heads.append(n)
341 343 return heads
342 344
343 345 @storecache('phaseroots')
344 346 def _phasecache(self):
345 347 return phases.phasecache(self, self._phasedefaults)
346 348
347 349 @storecache('obsstore')
348 350 def obsstore(self):
349 351 store = obsolete.obsstore(self.sopener)
350 352 if store and not obsolete._enabled:
351 353 # message is rare enough to not be translated
352 354 msg = 'obsolete feature not enabled but %i markers found!\n'
353 355 self.ui.warn(msg % len(list(store)))
354 356 return store
355 357
356 358 @storecache('00changelog.i')
357 359 def changelog(self):
358 360 c = changelog.changelog(self.sopener)
359 361 if 'HG_PENDING' in os.environ:
360 362 p = os.environ['HG_PENDING']
361 363 if p.startswith(self.root):
362 364 c.readpending('00changelog.i.a')
363 365 return c
364 366
365 367 @storecache('00manifest.i')
366 368 def manifest(self):
367 369 return manifest.manifest(self.sopener)
368 370
369 371 @repofilecache('dirstate')
370 372 def dirstate(self):
371 373 warned = [0]
372 374 def validate(node):
373 375 try:
374 376 self.changelog.rev(node)
375 377 return node
376 378 except error.LookupError:
377 379 if not warned[0]:
378 380 warned[0] = True
379 381 self.ui.warn(_("warning: ignoring unknown"
380 382 " working parent %s!\n") % short(node))
381 383 return nullid
382 384
383 385 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384 386
385 387 def __getitem__(self, changeid):
386 388 if changeid is None:
387 389 return context.workingctx(self)
388 390 return context.changectx(self, changeid)
389 391
390 392 def __contains__(self, changeid):
391 393 try:
392 394 return bool(self.lookup(changeid))
393 395 except error.RepoLookupError:
394 396 return False
395 397
396 398 def __nonzero__(self):
397 399 return True
398 400
399 401 def __len__(self):
400 402 return len(self.changelog)
401 403
402 404 def __iter__(self):
403 405 return iter(self.changelog)
404 406
405 407 def revs(self, expr, *args):
406 408 '''Return a list of revisions matching the given revset'''
407 409 expr = revset.formatspec(expr, *args)
408 410 m = revset.match(None, expr)
409 411 return [r for r in m(self, list(self))]
410 412
411 413 def set(self, expr, *args):
412 414 '''
413 415 Yield a context for each matching revision, after doing arg
414 416 replacement via revset.formatspec
415 417 '''
416 418 for r in self.revs(expr, *args):
417 419 yield self[r]
418 420
419 421 def url(self):
420 422 return 'file:' + self.root
421 423
422 424 def hook(self, name, throw=False, **args):
423 425 return hook.hook(self.ui, self, name, throw, **args)
424 426
425 427 @unfilteredmethod
426 428 def _tag(self, names, node, message, local, user, date, extra={}):
427 429 if isinstance(names, str):
428 430 names = (names,)
429 431
430 432 branches = self.branchmap()
431 433 for name in names:
432 434 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 435 local=local)
434 436 if name in branches:
435 437 self.ui.warn(_("warning: tag %s conflicts with existing"
436 438 " branch name\n") % name)
437 439
438 440 def writetags(fp, names, munge, prevtags):
439 441 fp.seek(0, 2)
440 442 if prevtags and prevtags[-1] != '\n':
441 443 fp.write('\n')
442 444 for name in names:
443 445 m = munge and munge(name) or name
444 446 if (self._tagscache.tagtypes and
445 447 name in self._tagscache.tagtypes):
446 448 old = self.tags().get(name, nullid)
447 449 fp.write('%s %s\n' % (hex(old), m))
448 450 fp.write('%s %s\n' % (hex(node), m))
449 451 fp.close()
450 452
451 453 prevtags = ''
452 454 if local:
453 455 try:
454 456 fp = self.opener('localtags', 'r+')
455 457 except IOError:
456 458 fp = self.opener('localtags', 'a')
457 459 else:
458 460 prevtags = fp.read()
459 461
460 462 # local tags are stored in the current charset
461 463 writetags(fp, names, None, prevtags)
462 464 for name in names:
463 465 self.hook('tag', node=hex(node), tag=name, local=local)
464 466 return
465 467
466 468 try:
467 469 fp = self.wfile('.hgtags', 'rb+')
468 470 except IOError, e:
469 471 if e.errno != errno.ENOENT:
470 472 raise
471 473 fp = self.wfile('.hgtags', 'ab')
472 474 else:
473 475 prevtags = fp.read()
474 476
475 477 # committed tags are stored in UTF-8
476 478 writetags(fp, names, encoding.fromlocal, prevtags)
477 479
478 480 fp.close()
479 481
480 482 self.invalidatecaches()
481 483
482 484 if '.hgtags' not in self.dirstate:
483 485 self[None].add(['.hgtags'])
484 486
485 487 m = matchmod.exact(self.root, '', ['.hgtags'])
486 488 tagnode = self.commit(message, user, date, extra=extra, match=m)
487 489
488 490 for name in names:
489 491 self.hook('tag', node=hex(node), tag=name, local=local)
490 492
491 493 return tagnode
492 494
493 495 def tag(self, names, node, message, local, user, date):
494 496 '''tag a revision with one or more symbolic names.
495 497
496 498 names is a list of strings or, when adding a single tag, names may be a
497 499 string.
498 500
499 501 if local is True, the tags are stored in a per-repository file.
500 502 otherwise, they are stored in the .hgtags file, and a new
501 503 changeset is committed with the change.
502 504
503 505 keyword arguments:
504 506
505 507 local: whether to store tags in non-version-controlled file
506 508 (default False)
507 509
508 510 message: commit message to use if committing
509 511
510 512 user: name of user to use if committing
511 513
512 514 date: date tuple to use if committing'''
513 515
514 516 if not local:
515 517 for x in self.status()[:5]:
516 518 if '.hgtags' in x:
517 519 raise util.Abort(_('working copy of .hgtags is changed '
518 520 '(please commit .hgtags manually)'))
519 521
520 522 self.tags() # instantiate the cache
521 523 self._tag(names, node, message, local, user, date)
522 524
523 525 @filteredpropertycache
524 526 def _tagscache(self):
525 527 '''Returns a tagscache object that contains various tags related
526 528 caches.'''
527 529
528 530 # This simplifies its cache management by having one decorated
529 531 # function (this one) and the rest simply fetch things from it.
530 532 class tagscache(object):
531 533 def __init__(self):
532 534 # These two define the set of tags for this repository. tags
533 535 # maps tag name to node; tagtypes maps tag name to 'global' or
534 536 # 'local'. (Global tags are defined by .hgtags across all
535 537 # heads, and local tags are defined in .hg/localtags.)
536 538 # They constitute the in-memory cache of tags.
537 539 self.tags = self.tagtypes = None
538 540
539 541 self.nodetagscache = self.tagslist = None
540 542
541 543 cache = tagscache()
542 544 cache.tags, cache.tagtypes = self._findtags()
543 545
544 546 return cache
545 547
546 548 def tags(self):
547 549 '''return a mapping of tag to node'''
548 550 t = {}
549 551 if self.changelog.filteredrevs:
550 552 tags, tt = self._findtags()
551 553 else:
552 554 tags = self._tagscache.tags
553 555 for k, v in tags.iteritems():
554 556 try:
555 557 # ignore tags to unknown nodes
556 558 self.changelog.rev(v)
557 559 t[k] = v
558 560 except (error.LookupError, ValueError):
559 561 pass
560 562 return t
561 563
562 564 def _findtags(self):
563 565 '''Do the hard work of finding tags. Return a pair of dicts
564 566 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 567 maps tag name to a string like \'global\' or \'local\'.
566 568 Subclasses or extensions are free to add their own tags, but
567 569 should be aware that the returned dicts will be retained for the
568 570 duration of the localrepo object.'''
569 571
570 572 # XXX what tagtype should subclasses/extensions use? Currently
571 573 # mq and bookmarks add tags, but do not set the tagtype at all.
572 574 # Should each extension invent its own tag type? Should there
573 575 # be one tagtype for all such "virtual" tags? Or is the status
574 576 # quo fine?
575 577
576 578 alltags = {} # map tag name to (node, hist)
577 579 tagtypes = {}
578 580
579 581 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 582 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581 583
582 584 # Build the return dicts. Have to re-encode tag names because
583 585 # the tags module always uses UTF-8 (in order not to lose info
584 586 # writing to the cache), but the rest of Mercurial wants them in
585 587 # local encoding.
586 588 tags = {}
587 589 for (name, (node, hist)) in alltags.iteritems():
588 590 if node != nullid:
589 591 tags[encoding.tolocal(name)] = node
590 592 tags['tip'] = self.changelog.tip()
591 593 tagtypes = dict([(encoding.tolocal(name), value)
592 594 for (name, value) in tagtypes.iteritems()])
593 595 return (tags, tagtypes)
594 596
595 597 def tagtype(self, tagname):
596 598 '''
597 599 return the type of the given tag. result can be:
598 600
599 601 'local' : a local tag
600 602 'global' : a global tag
601 603 None : tag does not exist
602 604 '''
603 605
604 606 return self._tagscache.tagtypes.get(tagname)
605 607
606 608 def tagslist(self):
607 609 '''return a list of tags ordered by revision'''
608 610 if not self._tagscache.tagslist:
609 611 l = []
610 612 for t, n in self.tags().iteritems():
611 613 r = self.changelog.rev(n)
612 614 l.append((r, t, n))
613 615 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614 616
615 617 return self._tagscache.tagslist
616 618
617 619 def nodetags(self, node):
618 620 '''return the tags associated with a node'''
619 621 if not self._tagscache.nodetagscache:
620 622 nodetagscache = {}
621 623 for t, n in self._tagscache.tags.iteritems():
622 624 nodetagscache.setdefault(n, []).append(t)
623 625 for tags in nodetagscache.itervalues():
624 626 tags.sort()
625 627 self._tagscache.nodetagscache = nodetagscache
626 628 return self._tagscache.nodetagscache.get(node, [])
627 629
628 630 def nodebookmarks(self, node):
629 631 marks = []
630 632 for bookmark, n in self._bookmarks.iteritems():
631 633 if n == node:
632 634 marks.append(bookmark)
633 635 return sorted(marks)
634 636
635 637 def branchmap(self):
636 638 '''returns a dictionary {branch: [branchheads]}'''
637 639 branchmap.updatecache(self)
638 640 return self._branchcaches[self.filtername]
639 641
640 642
641 643 def _branchtip(self, heads):
642 644 '''return the tipmost branch head in heads'''
643 645 tip = heads[-1]
644 646 for h in reversed(heads):
645 647 if not self[h].closesbranch():
646 648 tip = h
647 649 break
648 650 return tip
649 651
650 652 def branchtip(self, branch):
651 653 '''return the tip node for a given branch'''
652 654 if branch not in self.branchmap():
653 655 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
654 656 return self._branchtip(self.branchmap()[branch])
655 657
656 658 def branchtags(self):
657 659 '''return a dict where branch names map to the tipmost head of
658 660 the branch, open heads come before closed'''
659 661 bt = {}
660 662 for bn, heads in self.branchmap().iteritems():
661 663 bt[bn] = self._branchtip(heads)
662 664 return bt
663 665
664 666 def lookup(self, key):
665 667 return self[key].node()
666 668
667 669 def lookupbranch(self, key, remote=None):
668 670 repo = remote or self
669 671 if key in repo.branchmap():
670 672 return key
671 673
672 674 repo = (remote and remote.local()) and remote or self
673 675 return repo[key].branch()
674 676
675 677 def known(self, nodes):
676 678 nm = self.changelog.nodemap
677 679 pc = self._phasecache
678 680 result = []
679 681 for n in nodes:
680 682 r = nm.get(n)
681 683 resp = not (r is None or pc.phase(self, r) >= phases.secret)
682 684 result.append(resp)
683 685 return result
684 686
685 687 def local(self):
686 688 return self
687 689
688 690 def cancopy(self):
689 691 return self.local() # so statichttprepo's override of local() works
690 692
691 693 def join(self, f):
692 694 return os.path.join(self.path, f)
693 695
694 696 def wjoin(self, f):
695 697 return os.path.join(self.root, f)
696 698
697 699 def file(self, f):
698 700 if f[0] == '/':
699 701 f = f[1:]
700 702 return filelog.filelog(self.sopener, f)
701 703
702 704 def changectx(self, changeid):
703 705 return self[changeid]
704 706
705 707 def parents(self, changeid=None):
706 708 '''get list of changectxs for parents of changeid'''
707 709 return self[changeid].parents()
708 710
709 711 def setparents(self, p1, p2=nullid):
710 712 copies = self.dirstate.setparents(p1, p2)
711 713 pctx = self[p1]
712 714 if copies:
713 715 # Adjust copy records, the dirstate cannot do it, it
714 716 # requires access to parents manifests. Preserve them
715 717 # only for entries added to first parent.
716 718 for f in copies:
717 719 if f not in pctx and copies[f] in pctx:
718 720 self.dirstate.copy(copies[f], f)
719 721 if p2 == nullid:
720 722 for f, s in sorted(self.dirstate.copies().items()):
721 723 if f not in pctx and s not in pctx:
722 724 self.dirstate.copy(None, f)
723 725
724 726 def filectx(self, path, changeid=None, fileid=None):
725 727 """changeid can be a changeset revision, node, or tag.
726 728 fileid can be a file revision or node."""
727 729 return context.filectx(self, path, changeid, fileid)
728 730
729 731 def getcwd(self):
730 732 return self.dirstate.getcwd()
731 733
732 734 def pathto(self, f, cwd=None):
733 735 return self.dirstate.pathto(f, cwd)
734 736
735 737 def wfile(self, f, mode='r'):
736 738 return self.wopener(f, mode)
737 739
738 740 def _link(self, f):
739 741 return self.wvfs.islink(f)
740 742
741 743 def _loadfilter(self, filter):
742 744 if filter not in self.filterpats:
743 745 l = []
744 746 for pat, cmd in self.ui.configitems(filter):
745 747 if cmd == '!':
746 748 continue
747 749 mf = matchmod.match(self.root, '', [pat])
748 750 fn = None
749 751 params = cmd
750 752 for name, filterfn in self._datafilters.iteritems():
751 753 if cmd.startswith(name):
752 754 fn = filterfn
753 755 params = cmd[len(name):].lstrip()
754 756 break
755 757 if not fn:
756 758 fn = lambda s, c, **kwargs: util.filter(s, c)
757 759 # Wrap old filters not supporting keyword arguments
758 760 if not inspect.getargspec(fn)[2]:
759 761 oldfn = fn
760 762 fn = lambda s, c, **kwargs: oldfn(s, c)
761 763 l.append((mf, fn, params))
762 764 self.filterpats[filter] = l
763 765 return self.filterpats[filter]
764 766
765 767 def _filter(self, filterpats, filename, data):
766 768 for mf, fn, cmd in filterpats:
767 769 if mf(filename):
768 770 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
769 771 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
770 772 break
771 773
772 774 return data
773 775
774 776 @unfilteredpropertycache
775 777 def _encodefilterpats(self):
776 778 return self._loadfilter('encode')
777 779
778 780 @unfilteredpropertycache
779 781 def _decodefilterpats(self):
780 782 return self._loadfilter('decode')
781 783
782 784 def adddatafilter(self, name, filter):
783 785 self._datafilters[name] = filter
784 786
785 787 def wread(self, filename):
786 788 if self._link(filename):
787 789 data = self.wvfs.readlink(filename)
788 790 else:
789 791 data = self.wopener.read(filename)
790 792 return self._filter(self._encodefilterpats, filename, data)
791 793
792 794 def wwrite(self, filename, data, flags):
793 795 data = self._filter(self._decodefilterpats, filename, data)
794 796 if 'l' in flags:
795 797 self.wopener.symlink(data, filename)
796 798 else:
797 799 self.wopener.write(filename, data)
798 800 if 'x' in flags:
799 801 self.wvfs.setflags(filename, False, True)
800 802
801 803 def wwritedata(self, filename, data):
802 804 return self._filter(self._decodefilterpats, filename, data)
803 805
804 806 def transaction(self, desc):
805 807 tr = self._transref and self._transref() or None
806 808 if tr and tr.running():
807 809 return tr.nest()
808 810
809 811 # abort here if the journal already exists
810 812 if self.svfs.exists("journal"):
811 813 raise error.RepoError(
812 814 _("abandoned transaction found - run hg recover"))
813 815
814 816 self._writejournal(desc)
815 817 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
816 818
817 819 tr = transaction.transaction(self.ui.warn, self.sopener,
818 820 self.sjoin("journal"),
819 821 aftertrans(renames),
820 822 self.store.createmode)
821 823 self._transref = weakref.ref(tr)
822 824 return tr
823 825
824 826 def _journalfiles(self):
825 827 return ((self.svfs, 'journal'),
826 828 (self.vfs, 'journal.dirstate'),
827 829 (self.vfs, 'journal.branch'),
828 830 (self.vfs, 'journal.desc'),
829 831 (self.vfs, 'journal.bookmarks'),
830 832 (self.svfs, 'journal.phaseroots'))
831 833
832 834 def undofiles(self):
833 835 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
834 836
835 837 def _writejournal(self, desc):
836 838 self.opener.write("journal.dirstate",
837 839 self.opener.tryread("dirstate"))
838 840 self.opener.write("journal.branch",
839 841 encoding.fromlocal(self.dirstate.branch()))
840 842 self.opener.write("journal.desc",
841 843 "%d\n%s\n" % (len(self), desc))
842 844 self.opener.write("journal.bookmarks",
843 845 self.opener.tryread("bookmarks"))
844 846 self.sopener.write("journal.phaseroots",
845 847 self.sopener.tryread("phaseroots"))
846 848
847 849 def recover(self):
848 850 lock = self.lock()
849 851 try:
850 852 if self.svfs.exists("journal"):
851 853 self.ui.status(_("rolling back interrupted transaction\n"))
852 854 transaction.rollback(self.sopener, self.sjoin("journal"),
853 855 self.ui.warn)
854 856 self.invalidate()
855 857 return True
856 858 else:
857 859 self.ui.warn(_("no interrupted transaction available\n"))
858 860 return False
859 861 finally:
860 862 lock.release()
861 863
862 864 def rollback(self, dryrun=False, force=False):
863 865 wlock = lock = None
864 866 try:
865 867 wlock = self.wlock()
866 868 lock = self.lock()
867 869 if self.svfs.exists("undo"):
868 870 return self._rollback(dryrun, force)
869 871 else:
870 872 self.ui.warn(_("no rollback information available\n"))
871 873 return 1
872 874 finally:
873 875 release(lock, wlock)
874 876
875 877 @unfilteredmethod # Until we get smarter cache management
876 878 def _rollback(self, dryrun, force):
877 879 ui = self.ui
878 880 try:
879 881 args = self.opener.read('undo.desc').splitlines()
880 882 (oldlen, desc, detail) = (int(args[0]), args[1], None)
881 883 if len(args) >= 3:
882 884 detail = args[2]
883 885 oldtip = oldlen - 1
884 886
885 887 if detail and ui.verbose:
886 888 msg = (_('repository tip rolled back to revision %s'
887 889 ' (undo %s: %s)\n')
888 890 % (oldtip, desc, detail))
889 891 else:
890 892 msg = (_('repository tip rolled back to revision %s'
891 893 ' (undo %s)\n')
892 894 % (oldtip, desc))
893 895 except IOError:
894 896 msg = _('rolling back unknown transaction\n')
895 897 desc = None
896 898
897 899 if not force and self['.'] != self['tip'] and desc == 'commit':
898 900 raise util.Abort(
899 901 _('rollback of last commit while not checked out '
900 902 'may lose data'), hint=_('use -f to force'))
901 903
902 904 ui.status(msg)
903 905 if dryrun:
904 906 return 0
905 907
906 908 parents = self.dirstate.parents()
907 909 self.destroying()
908 910 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
909 911 if self.vfs.exists('undo.bookmarks'):
910 912 self.vfs.rename('undo.bookmarks', 'bookmarks')
911 913 if self.svfs.exists('undo.phaseroots'):
912 914 self.svfs.rename('undo.phaseroots', 'phaseroots')
913 915 self.invalidate()
914 916
915 917 parentgone = (parents[0] not in self.changelog.nodemap or
916 918 parents[1] not in self.changelog.nodemap)
917 919 if parentgone:
918 920 self.vfs.rename('undo.dirstate', 'dirstate')
919 921 try:
920 922 branch = self.opener.read('undo.branch')
921 923 self.dirstate.setbranch(encoding.tolocal(branch))
922 924 except IOError:
923 925 ui.warn(_('named branch could not be reset: '
924 926 'current branch is still \'%s\'\n')
925 927 % self.dirstate.branch())
926 928
927 929 self.dirstate.invalidate()
928 930 parents = tuple([p.rev() for p in self.parents()])
929 931 if len(parents) > 1:
930 932 ui.status(_('working directory now based on '
931 933 'revisions %d and %d\n') % parents)
932 934 else:
933 935 ui.status(_('working directory now based on '
934 936 'revision %d\n') % parents)
935 937 # TODO: if we know which new heads may result from this rollback, pass
936 938 # them to destroy(), which will prevent the branchhead cache from being
937 939 # invalidated.
938 940 self.destroyed()
939 941 return 0
940 942
941 943 def invalidatecaches(self):
942 944
943 945 if '_tagscache' in vars(self):
944 946 # can't use delattr on proxy
945 947 del self.__dict__['_tagscache']
946 948
947 949 self.unfiltered()._branchcaches.clear()
948 950 self.invalidatevolatilesets()
949 951
950 952 def invalidatevolatilesets(self):
951 953 self.filteredrevcache.clear()
952 954 obsolete.clearobscaches(self)
953 955
954 956 def invalidatedirstate(self):
955 957 '''Invalidates the dirstate, causing the next call to dirstate
956 958 to check if it was modified since the last time it was read,
957 959 rereading it if it has.
958 960
959 961 This is different to dirstate.invalidate() that it doesn't always
960 962 rereads the dirstate. Use dirstate.invalidate() if you want to
961 963 explicitly read the dirstate again (i.e. restoring it to a previous
962 964 known good state).'''
963 965 if hasunfilteredcache(self, 'dirstate'):
964 966 for k in self.dirstate._filecache:
965 967 try:
966 968 delattr(self.dirstate, k)
967 969 except AttributeError:
968 970 pass
969 971 delattr(self.unfiltered(), 'dirstate')
970 972
971 973 def invalidate(self):
972 974 unfiltered = self.unfiltered() # all file caches are stored unfiltered
973 975 for k in self._filecache:
974 976 # dirstate is invalidated separately in invalidatedirstate()
975 977 if k == 'dirstate':
976 978 continue
977 979
978 980 try:
979 981 delattr(unfiltered, k)
980 982 except AttributeError:
981 983 pass
982 984 self.invalidatecaches()
983 985
984 986 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
985 987 try:
986 988 l = lock.lock(lockname, 0, releasefn, desc=desc)
987 989 except error.LockHeld, inst:
988 990 if not wait:
989 991 raise
990 992 self.ui.warn(_("waiting for lock on %s held by %r\n") %
991 993 (desc, inst.locker))
992 994 # default to 600 seconds timeout
993 995 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
994 996 releasefn, desc=desc)
995 997 if acquirefn:
996 998 acquirefn()
997 999 return l
998 1000
999 1001 def _afterlock(self, callback):
1000 1002 """add a callback to the current repository lock.
1001 1003
1002 1004 The callback will be executed on lock release."""
1003 1005 l = self._lockref and self._lockref()
1004 1006 if l:
1005 1007 l.postrelease.append(callback)
1006 1008 else:
1007 1009 callback()
1008 1010
1009 1011 def lock(self, wait=True):
1010 1012 '''Lock the repository store (.hg/store) and return a weak reference
1011 1013 to the lock. Use this before modifying the store (e.g. committing or
1012 1014 stripping). If you are opening a transaction, get a lock as well.)'''
1013 1015 l = self._lockref and self._lockref()
1014 1016 if l is not None and l.held:
1015 1017 l.lock()
1016 1018 return l
1017 1019
1018 1020 def unlock():
1019 1021 self.store.write()
1020 1022 if hasunfilteredcache(self, '_phasecache'):
1021 1023 self._phasecache.write()
1022 1024 for k, ce in self._filecache.items():
1023 1025 if k == 'dirstate' or k not in self.__dict__:
1024 1026 continue
1025 1027 ce.refresh()
1026 1028
1027 1029 l = self._lock(self.sjoin("lock"), wait, unlock,
1028 1030 self.invalidate, _('repository %s') % self.origroot)
1029 1031 self._lockref = weakref.ref(l)
1030 1032 return l
1031 1033
1032 1034 def wlock(self, wait=True):
1033 1035 '''Lock the non-store parts of the repository (everything under
1034 1036 .hg except .hg/store) and return a weak reference to the lock.
1035 1037 Use this before modifying files in .hg.'''
1036 1038 l = self._wlockref and self._wlockref()
1037 1039 if l is not None and l.held:
1038 1040 l.lock()
1039 1041 return l
1040 1042
1041 1043 def unlock():
1042 1044 self.dirstate.write()
1043 1045 self._filecache['dirstate'].refresh()
1044 1046
1045 1047 l = self._lock(self.join("wlock"), wait, unlock,
1046 1048 self.invalidatedirstate, _('working directory of %s') %
1047 1049 self.origroot)
1048 1050 self._wlockref = weakref.ref(l)
1049 1051 return l
1050 1052
1051 1053 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1052 1054 """
1053 1055 commit an individual file as part of a larger transaction
1054 1056 """
1055 1057
1056 1058 fname = fctx.path()
1057 1059 text = fctx.data()
1058 1060 flog = self.file(fname)
1059 1061 fparent1 = manifest1.get(fname, nullid)
1060 1062 fparent2 = fparent2o = manifest2.get(fname, nullid)
1061 1063
1062 1064 meta = {}
1063 1065 copy = fctx.renamed()
1064 1066 if copy and copy[0] != fname:
1065 1067 # Mark the new revision of this file as a copy of another
1066 1068 # file. This copy data will effectively act as a parent
1067 1069 # of this new revision. If this is a merge, the first
1068 1070 # parent will be the nullid (meaning "look up the copy data")
1069 1071 # and the second one will be the other parent. For example:
1070 1072 #
1071 1073 # 0 --- 1 --- 3 rev1 changes file foo
1072 1074 # \ / rev2 renames foo to bar and changes it
1073 1075 # \- 2 -/ rev3 should have bar with all changes and
1074 1076 # should record that bar descends from
1075 1077 # bar in rev2 and foo in rev1
1076 1078 #
1077 1079 # this allows this merge to succeed:
1078 1080 #
1079 1081 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1080 1082 # \ / merging rev3 and rev4 should use bar@rev2
1081 1083 # \- 2 --- 4 as the merge base
1082 1084 #
1083 1085
1084 1086 cfname = copy[0]
1085 1087 crev = manifest1.get(cfname)
1086 1088 newfparent = fparent2
1087 1089
1088 1090 if manifest2: # branch merge
1089 1091 if fparent2 == nullid or crev is None: # copied on remote side
1090 1092 if cfname in manifest2:
1091 1093 crev = manifest2[cfname]
1092 1094 newfparent = fparent1
1093 1095
1094 1096 # find source in nearest ancestor if we've lost track
1095 1097 if not crev:
1096 1098 self.ui.debug(" %s: searching for copy revision for %s\n" %
1097 1099 (fname, cfname))
1098 1100 for ancestor in self[None].ancestors():
1099 1101 if cfname in ancestor:
1100 1102 crev = ancestor[cfname].filenode()
1101 1103 break
1102 1104
1103 1105 if crev:
1104 1106 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1105 1107 meta["copy"] = cfname
1106 1108 meta["copyrev"] = hex(crev)
1107 1109 fparent1, fparent2 = nullid, newfparent
1108 1110 else:
1109 1111 self.ui.warn(_("warning: can't find ancestor for '%s' "
1110 1112 "copied from '%s'!\n") % (fname, cfname))
1111 1113
1112 1114 elif fparent2 != nullid:
1113 1115 # is one parent an ancestor of the other?
1114 1116 fparentancestor = flog.ancestor(fparent1, fparent2)
1115 1117 if fparentancestor == fparent1:
1116 1118 fparent1, fparent2 = fparent2, nullid
1117 1119 elif fparentancestor == fparent2:
1118 1120 fparent2 = nullid
1119 1121
1120 1122 # is the file changed?
1121 1123 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1122 1124 changelist.append(fname)
1123 1125 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1124 1126
1125 1127 # are just the flags changed during merge?
1126 1128 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1127 1129 changelist.append(fname)
1128 1130
1129 1131 return fparent1
1130 1132
1131 1133 @unfilteredmethod
1132 1134 def commit(self, text="", user=None, date=None, match=None, force=False,
1133 1135 editor=False, extra={}):
1134 1136 """Add a new revision to current repository.
1135 1137
1136 1138 Revision information is gathered from the working directory,
1137 1139 match can be used to filter the committed files. If editor is
1138 1140 supplied, it is called to get a commit message.
1139 1141 """
1140 1142
1141 1143 def fail(f, msg):
1142 1144 raise util.Abort('%s: %s' % (f, msg))
1143 1145
1144 1146 if not match:
1145 1147 match = matchmod.always(self.root, '')
1146 1148
1147 1149 if not force:
1148 1150 vdirs = []
1149 1151 match.explicitdir = vdirs.append
1150 1152 match.bad = fail
1151 1153
1152 1154 wlock = self.wlock()
1153 1155 try:
1154 1156 wctx = self[None]
1155 1157 merge = len(wctx.parents()) > 1
1156 1158
1157 1159 if (not force and merge and match and
1158 1160 (match.files() or match.anypats())):
1159 1161 raise util.Abort(_('cannot partially commit a merge '
1160 1162 '(do not specify files or patterns)'))
1161 1163
1162 1164 changes = self.status(match=match, clean=force)
1163 1165 if force:
1164 1166 changes[0].extend(changes[6]) # mq may commit unchanged files
1165 1167
1166 1168 # check subrepos
1167 1169 subs = []
1168 1170 commitsubs = set()
1169 1171 newstate = wctx.substate.copy()
1170 1172 # only manage subrepos and .hgsubstate if .hgsub is present
1171 1173 if '.hgsub' in wctx:
1172 1174 # we'll decide whether to track this ourselves, thanks
1173 1175 if '.hgsubstate' in changes[0]:
1174 1176 changes[0].remove('.hgsubstate')
1175 1177 if '.hgsubstate' in changes[2]:
1176 1178 changes[2].remove('.hgsubstate')
1177 1179
1178 1180 # compare current state to last committed state
1179 1181 # build new substate based on last committed state
1180 1182 oldstate = wctx.p1().substate
1181 1183 for s in sorted(newstate.keys()):
1182 1184 if not match(s):
1183 1185 # ignore working copy, use old state if present
1184 1186 if s in oldstate:
1185 1187 newstate[s] = oldstate[s]
1186 1188 continue
1187 1189 if not force:
1188 1190 raise util.Abort(
1189 1191 _("commit with new subrepo %s excluded") % s)
1190 1192 if wctx.sub(s).dirty(True):
1191 1193 if not self.ui.configbool('ui', 'commitsubrepos'):
1192 1194 raise util.Abort(
1193 1195 _("uncommitted changes in subrepo %s") % s,
1194 1196 hint=_("use --subrepos for recursive commit"))
1195 1197 subs.append(s)
1196 1198 commitsubs.add(s)
1197 1199 else:
1198 1200 bs = wctx.sub(s).basestate()
1199 1201 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1200 1202 if oldstate.get(s, (None, None, None))[1] != bs:
1201 1203 subs.append(s)
1202 1204
1203 1205 # check for removed subrepos
1204 1206 for p in wctx.parents():
1205 1207 r = [s for s in p.substate if s not in newstate]
1206 1208 subs += [s for s in r if match(s)]
1207 1209 if subs:
1208 1210 if (not match('.hgsub') and
1209 1211 '.hgsub' in (wctx.modified() + wctx.added())):
1210 1212 raise util.Abort(
1211 1213 _("can't commit subrepos without .hgsub"))
1212 1214 changes[0].insert(0, '.hgsubstate')
1213 1215
1214 1216 elif '.hgsub' in changes[2]:
1215 1217 # clean up .hgsubstate when .hgsub is removed
1216 1218 if ('.hgsubstate' in wctx and
1217 1219 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1218 1220 changes[2].insert(0, '.hgsubstate')
1219 1221
1220 1222 # make sure all explicit patterns are matched
1221 1223 if not force and match.files():
1222 1224 matched = set(changes[0] + changes[1] + changes[2])
1223 1225
1224 1226 for f in match.files():
1225 1227 f = self.dirstate.normalize(f)
1226 1228 if f == '.' or f in matched or f in wctx.substate:
1227 1229 continue
1228 1230 if f in changes[3]: # missing
1229 1231 fail(f, _('file not found!'))
1230 1232 if f in vdirs: # visited directory
1231 1233 d = f + '/'
1232 1234 for mf in matched:
1233 1235 if mf.startswith(d):
1234 1236 break
1235 1237 else:
1236 1238 fail(f, _("no match under directory!"))
1237 1239 elif f not in self.dirstate:
1238 1240 fail(f, _("file not tracked!"))
1239 1241
1240 1242 cctx = context.workingctx(self, text, user, date, extra, changes)
1241 1243
1242 1244 if (not force and not extra.get("close") and not merge
1243 1245 and not cctx.files()
1244 1246 and wctx.branch() == wctx.p1().branch()):
1245 1247 return None
1246 1248
1247 1249 if merge and cctx.deleted():
1248 1250 raise util.Abort(_("cannot commit merge with missing files"))
1249 1251
1250 1252 ms = mergemod.mergestate(self)
1251 1253 for f in changes[0]:
1252 1254 if f in ms and ms[f] == 'u':
1253 1255 raise util.Abort(_("unresolved merge conflicts "
1254 1256 "(see hg help resolve)"))
1255 1257
1256 1258 if editor:
1257 1259 cctx._text = editor(self, cctx, subs)
1258 1260 edited = (text != cctx._text)
1259 1261
1260 1262 # commit subs and write new state
1261 1263 if subs:
1262 1264 for s in sorted(commitsubs):
1263 1265 sub = wctx.sub(s)
1264 1266 self.ui.status(_('committing subrepository %s\n') %
1265 1267 subrepo.subrelpath(sub))
1266 1268 sr = sub.commit(cctx._text, user, date)
1267 1269 newstate[s] = (newstate[s][0], sr)
1268 1270 subrepo.writestate(self, newstate)
1269 1271
1270 1272 # Save commit message in case this transaction gets rolled back
1271 1273 # (e.g. by a pretxncommit hook). Leave the content alone on
1272 1274 # the assumption that the user will use the same editor again.
1273 1275 msgfn = self.savecommitmessage(cctx._text)
1274 1276
1275 1277 p1, p2 = self.dirstate.parents()
1276 1278 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1277 1279 try:
1278 1280 self.hook("precommit", throw=True, parent1=hookp1,
1279 1281 parent2=hookp2)
1280 1282 ret = self.commitctx(cctx, True)
1281 1283 except: # re-raises
1282 1284 if edited:
1283 1285 self.ui.write(
1284 1286 _('note: commit message saved in %s\n') % msgfn)
1285 1287 raise
1286 1288
1287 1289 # update bookmarks, dirstate and mergestate
1288 1290 bookmarks.update(self, [p1, p2], ret)
1289 1291 cctx.markcommitted(ret)
1290 1292 ms.reset()
1291 1293 finally:
1292 1294 wlock.release()
1293 1295
1294 1296 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1295 1297 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1296 1298 self._afterlock(commithook)
1297 1299 return ret
1298 1300
1299 1301 @unfilteredmethod
1300 1302 def commitctx(self, ctx, error=False):
1301 1303 """Add a new revision to current repository.
1302 1304 Revision information is passed via the context argument.
1303 1305 """
1304 1306
1305 1307 tr = lock = None
1306 1308 removed = list(ctx.removed())
1307 1309 p1, p2 = ctx.p1(), ctx.p2()
1308 1310 user = ctx.user()
1309 1311
1310 1312 lock = self.lock()
1311 1313 try:
1312 1314 tr = self.transaction("commit")
1313 1315 trp = weakref.proxy(tr)
1314 1316
1315 1317 if ctx.files():
1316 1318 m1 = p1.manifest().copy()
1317 1319 m2 = p2.manifest()
1318 1320
1319 1321 # check in files
1320 1322 new = {}
1321 1323 changed = []
1322 1324 linkrev = len(self)
1323 1325 for f in sorted(ctx.modified() + ctx.added()):
1324 1326 self.ui.note(f + "\n")
1325 1327 try:
1326 1328 fctx = ctx[f]
1327 1329 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1328 1330 changed)
1329 1331 m1.set(f, fctx.flags())
1330 1332 except OSError, inst:
1331 1333 self.ui.warn(_("trouble committing %s!\n") % f)
1332 1334 raise
1333 1335 except IOError, inst:
1334 1336 errcode = getattr(inst, 'errno', errno.ENOENT)
1335 1337 if error or errcode and errcode != errno.ENOENT:
1336 1338 self.ui.warn(_("trouble committing %s!\n") % f)
1337 1339 raise
1338 1340 else:
1339 1341 removed.append(f)
1340 1342
1341 1343 # update manifest
1342 1344 m1.update(new)
1343 1345 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1344 1346 drop = [f for f in removed if f in m1]
1345 1347 for f in drop:
1346 1348 del m1[f]
1347 1349 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1348 1350 p2.manifestnode(), (new, drop))
1349 1351 files = changed + removed
1350 1352 else:
1351 1353 mn = p1.manifestnode()
1352 1354 files = []
1353 1355
1354 1356 # update changelog
1355 1357 self.changelog.delayupdate()
1356 1358 n = self.changelog.add(mn, files, ctx.description(),
1357 1359 trp, p1.node(), p2.node(),
1358 1360 user, ctx.date(), ctx.extra().copy())
1359 1361 p = lambda: self.changelog.writepending() and self.root or ""
1360 1362 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1361 1363 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1362 1364 parent2=xp2, pending=p)
1363 1365 self.changelog.finalize(trp)
1364 1366 # set the new commit is proper phase
1365 1367 targetphase = phases.newcommitphase(self.ui)
1366 1368 if targetphase:
1367 1369 # retract boundary do not alter parent changeset.
1368 1370 # if a parent have higher the resulting phase will
1369 1371 # be compliant anyway
1370 1372 #
1371 1373 # if minimal phase was 0 we don't need to retract anything
1372 1374 phases.retractboundary(self, targetphase, [n])
1373 1375 tr.close()
1374 1376 branchmap.updatecache(self.filtered('served'))
1375 1377 return n
1376 1378 finally:
1377 1379 if tr:
1378 1380 tr.release()
1379 1381 lock.release()
1380 1382
1381 1383 @unfilteredmethod
1382 1384 def destroying(self):
1383 1385 '''Inform the repository that nodes are about to be destroyed.
1384 1386 Intended for use by strip and rollback, so there's a common
1385 1387 place for anything that has to be done before destroying history.
1386 1388
1387 1389 This is mostly useful for saving state that is in memory and waiting
1388 1390 to be flushed when the current lock is released. Because a call to
1389 1391 destroyed is imminent, the repo will be invalidated causing those
1390 1392 changes to stay in memory (waiting for the next unlock), or vanish
1391 1393 completely.
1392 1394 '''
1393 1395 # When using the same lock to commit and strip, the phasecache is left
1394 1396 # dirty after committing. Then when we strip, the repo is invalidated,
1395 1397 # causing those changes to disappear.
1396 1398 if '_phasecache' in vars(self):
1397 1399 self._phasecache.write()
1398 1400
1399 1401 @unfilteredmethod
1400 1402 def destroyed(self):
1401 1403 '''Inform the repository that nodes have been destroyed.
1402 1404 Intended for use by strip and rollback, so there's a common
1403 1405 place for anything that has to be done after destroying history.
1404 1406 '''
1405 1407 # When one tries to:
1406 1408 # 1) destroy nodes thus calling this method (e.g. strip)
1407 1409 # 2) use phasecache somewhere (e.g. commit)
1408 1410 #
1409 1411 # then 2) will fail because the phasecache contains nodes that were
1410 1412 # removed. We can either remove phasecache from the filecache,
1411 1413 # causing it to reload next time it is accessed, or simply filter
1412 1414 # the removed nodes now and write the updated cache.
1413 1415 self._phasecache.filterunknown(self)
1414 1416 self._phasecache.write()
1415 1417
1416 1418 # update the 'served' branch cache to help read only server process
1417 1419 # Thanks to branchcache collaboration this is done from the nearest
1418 1420 # filtered subset and it is expected to be fast.
1419 1421 branchmap.updatecache(self.filtered('served'))
1420 1422
1421 1423 # Ensure the persistent tag cache is updated. Doing it now
1422 1424 # means that the tag cache only has to worry about destroyed
1423 1425 # heads immediately after a strip/rollback. That in turn
1424 1426 # guarantees that "cachetip == currenttip" (comparing both rev
1425 1427 # and node) always means no nodes have been added or destroyed.
1426 1428
1427 1429 # XXX this is suboptimal when qrefresh'ing: we strip the current
1428 1430 # head, refresh the tag cache, then immediately add a new head.
1429 1431 # But I think doing it this way is necessary for the "instant
1430 1432 # tag cache retrieval" case to work.
1431 1433 self.invalidate()
1432 1434
1433 1435 def walk(self, match, node=None):
1434 1436 '''
1435 1437 walk recursively through the directory tree or a given
1436 1438 changeset, finding all files matched by the match
1437 1439 function
1438 1440 '''
1439 1441 return self[node].walk(match)
1440 1442
1441 1443 def status(self, node1='.', node2=None, match=None,
1442 1444 ignored=False, clean=False, unknown=False,
1443 1445 listsubrepos=False):
1444 1446 """return status of files between two nodes or node and working
1445 1447 directory.
1446 1448
1447 1449 If node1 is None, use the first dirstate parent instead.
1448 1450 If node2 is None, compare node1 with working directory.
1449 1451 """
1450 1452
1451 1453 def mfmatches(ctx):
1452 1454 mf = ctx.manifest().copy()
1453 1455 if match.always():
1454 1456 return mf
1455 1457 for fn in mf.keys():
1456 1458 if not match(fn):
1457 1459 del mf[fn]
1458 1460 return mf
1459 1461
1460 1462 ctx1 = self[node1]
1461 1463 ctx2 = self[node2]
1462 1464
1463 1465 working = ctx2.rev() is None
1464 1466 parentworking = working and ctx1 == self['.']
1465 1467 match = match or matchmod.always(self.root, self.getcwd())
1466 1468 listignored, listclean, listunknown = ignored, clean, unknown
1467 1469
1468 1470 # load earliest manifest first for caching reasons
1469 1471 if not working and ctx2.rev() < ctx1.rev():
1470 1472 ctx2.manifest()
1471 1473
1472 1474 if not parentworking:
1473 1475 def bad(f, msg):
1474 1476 # 'f' may be a directory pattern from 'match.files()',
1475 1477 # so 'f not in ctx1' is not enough
1476 1478 if f not in ctx1 and f not in ctx1.dirs():
1477 1479 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1478 1480 match.bad = bad
1479 1481
1480 1482 if working: # we need to scan the working dir
1481 1483 subrepos = []
1482 1484 if '.hgsub' in self.dirstate:
1483 1485 subrepos = sorted(ctx2.substate)
1484 1486 s = self.dirstate.status(match, subrepos, listignored,
1485 1487 listclean, listunknown)
1486 1488 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1487 1489
1488 1490 # check for any possibly clean files
1489 1491 if parentworking and cmp:
1490 1492 fixup = []
1491 1493 # do a full compare of any files that might have changed
1492 1494 for f in sorted(cmp):
1493 1495 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1494 1496 or ctx1[f].cmp(ctx2[f])):
1495 1497 modified.append(f)
1496 1498 else:
1497 1499 fixup.append(f)
1498 1500
1499 1501 # update dirstate for files that are actually clean
1500 1502 if fixup:
1501 1503 if listclean:
1502 1504 clean += fixup
1503 1505
1504 1506 try:
1505 1507 # updating the dirstate is optional
1506 1508 # so we don't wait on the lock
1507 1509 wlock = self.wlock(False)
1508 1510 try:
1509 1511 for f in fixup:
1510 1512 self.dirstate.normal(f)
1511 1513 finally:
1512 1514 wlock.release()
1513 1515 except error.LockError:
1514 1516 pass
1515 1517
1516 1518 if not parentworking:
1517 1519 mf1 = mfmatches(ctx1)
1518 1520 if working:
1519 1521 # we are comparing working dir against non-parent
1520 1522 # generate a pseudo-manifest for the working dir
1521 1523 mf2 = mfmatches(self['.'])
1522 1524 for f in cmp + modified + added:
1523 1525 mf2[f] = None
1524 1526 mf2.set(f, ctx2.flags(f))
1525 1527 for f in removed:
1526 1528 if f in mf2:
1527 1529 del mf2[f]
1528 1530 else:
1529 1531 # we are comparing two revisions
1530 1532 deleted, unknown, ignored = [], [], []
1531 1533 mf2 = mfmatches(ctx2)
1532 1534
1533 1535 modified, added, clean = [], [], []
1534 1536 withflags = mf1.withflags() | mf2.withflags()
1535 1537 for fn, mf2node in mf2.iteritems():
1536 1538 if fn in mf1:
1537 1539 if (fn not in deleted and
1538 1540 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1539 1541 (mf1[fn] != mf2node and
1540 1542 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1541 1543 modified.append(fn)
1542 1544 elif listclean:
1543 1545 clean.append(fn)
1544 1546 del mf1[fn]
1545 1547 elif fn not in deleted:
1546 1548 added.append(fn)
1547 1549 removed = mf1.keys()
1548 1550
1549 1551 if working and modified and not self.dirstate._checklink:
1550 1552 # Symlink placeholders may get non-symlink-like contents
1551 1553 # via user error or dereferencing by NFS or Samba servers,
1552 1554 # so we filter out any placeholders that don't look like a
1553 1555 # symlink
1554 1556 sane = []
1555 1557 for f in modified:
1556 1558 if ctx2.flags(f) == 'l':
1557 1559 d = ctx2[f].data()
1558 1560 if len(d) >= 1024 or '\n' in d or util.binary(d):
1559 1561 self.ui.debug('ignoring suspect symlink placeholder'
1560 1562 ' "%s"\n' % f)
1561 1563 continue
1562 1564 sane.append(f)
1563 1565 modified = sane
1564 1566
1565 1567 r = modified, added, removed, deleted, unknown, ignored, clean
1566 1568
1567 1569 if listsubrepos:
1568 1570 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1569 1571 if working:
1570 1572 rev2 = None
1571 1573 else:
1572 1574 rev2 = ctx2.substate[subpath][1]
1573 1575 try:
1574 1576 submatch = matchmod.narrowmatcher(subpath, match)
1575 1577 s = sub.status(rev2, match=submatch, ignored=listignored,
1576 1578 clean=listclean, unknown=listunknown,
1577 1579 listsubrepos=True)
1578 1580 for rfiles, sfiles in zip(r, s):
1579 1581 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1580 1582 except error.LookupError:
1581 1583 self.ui.status(_("skipping missing subrepository: %s\n")
1582 1584 % subpath)
1583 1585
1584 1586 for l in r:
1585 1587 l.sort()
1586 1588 return r
1587 1589
1588 1590 def heads(self, start=None):
1589 1591 heads = self.changelog.heads(start)
1590 1592 # sort the output in rev descending order
1591 1593 return sorted(heads, key=self.changelog.rev, reverse=True)
1592 1594
1593 1595 def branchheads(self, branch=None, start=None, closed=False):
1594 1596 '''return a (possibly filtered) list of heads for the given branch
1595 1597
1596 1598 Heads are returned in topological order, from newest to oldest.
1597 1599 If branch is None, use the dirstate branch.
1598 1600 If start is not None, return only heads reachable from start.
1599 1601 If closed is True, return heads that are marked as closed as well.
1600 1602 '''
1601 1603 if branch is None:
1602 1604 branch = self[None].branch()
1603 1605 branches = self.branchmap()
1604 1606 if branch not in branches:
1605 1607 return []
1606 1608 # the cache returns heads ordered lowest to highest
1607 1609 bheads = list(reversed(branches[branch]))
1608 1610 if start is not None:
1609 1611 # filter out the heads that cannot be reached from startrev
1610 1612 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1611 1613 bheads = [h for h in bheads if h in fbheads]
1612 1614 if not closed:
1613 1615 bheads = [h for h in bheads if not self[h].closesbranch()]
1614 1616 return bheads
1615 1617
1616 1618 def branches(self, nodes):
1617 1619 if not nodes:
1618 1620 nodes = [self.changelog.tip()]
1619 1621 b = []
1620 1622 for n in nodes:
1621 1623 t = n
1622 1624 while True:
1623 1625 p = self.changelog.parents(n)
1624 1626 if p[1] != nullid or p[0] == nullid:
1625 1627 b.append((t, n, p[0], p[1]))
1626 1628 break
1627 1629 n = p[0]
1628 1630 return b
1629 1631
1630 1632 def between(self, pairs):
1631 1633 r = []
1632 1634
1633 1635 for top, bottom in pairs:
1634 1636 n, l, i = top, [], 0
1635 1637 f = 1
1636 1638
1637 1639 while n != bottom and n != nullid:
1638 1640 p = self.changelog.parents(n)[0]
1639 1641 if i == f:
1640 1642 l.append(n)
1641 1643 f = f * 2
1642 1644 n = p
1643 1645 i += 1
1644 1646
1645 1647 r.append(l)
1646 1648
1647 1649 return r
1648 1650
1649 1651 def pull(self, remote, heads=None, force=False):
1650 1652 # don't open transaction for nothing or you break future useful
1651 1653 # rollback call
1652 1654 tr = None
1653 1655 trname = 'pull\n' + util.hidepassword(remote.url())
1654 1656 lock = self.lock()
1655 1657 try:
1656 1658 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1657 1659 force=force)
1658 1660 common, fetch, rheads = tmp
1659 1661 if not fetch:
1660 1662 self.ui.status(_("no changes found\n"))
1661 1663 added = []
1662 1664 result = 0
1663 1665 else:
1664 1666 tr = self.transaction(trname)
1665 1667 if heads is None and list(common) == [nullid]:
1666 1668 self.ui.status(_("requesting all changes\n"))
1667 1669 elif heads is None and remote.capable('changegroupsubset'):
1668 1670 # issue1320, avoid a race if remote changed after discovery
1669 1671 heads = rheads
1670 1672
1671 1673 if remote.capable('getbundle'):
1672 1674 # TODO: get bundlecaps from remote
1673 1675 cg = remote.getbundle('pull', common=common,
1674 1676 heads=heads or rheads)
1675 1677 elif heads is None:
1676 1678 cg = remote.changegroup(fetch, 'pull')
1677 1679 elif not remote.capable('changegroupsubset'):
1678 1680 raise util.Abort(_("partial pull cannot be done because "
1679 1681 "other repository doesn't support "
1680 1682 "changegroupsubset."))
1681 1683 else:
1682 1684 cg = remote.changegroupsubset(fetch, heads, 'pull')
1683 1685 # we use unfiltered changelog here because hidden revision must
1684 1686 # be taken in account for phase synchronization. They may
1685 1687 # becomes public and becomes visible again.
1686 1688 cl = self.unfiltered().changelog
1687 1689 clstart = len(cl)
1688 1690 result = self.addchangegroup(cg, 'pull', remote.url())
1689 1691 clend = len(cl)
1690 1692 added = [cl.node(r) for r in xrange(clstart, clend)]
1691 1693
1692 1694 # compute target subset
1693 1695 if heads is None:
1694 1696 # We pulled every thing possible
1695 1697 # sync on everything common
1696 1698 subset = common + added
1697 1699 else:
1698 1700 # We pulled a specific subset
1699 1701 # sync on this subset
1700 1702 subset = heads
1701 1703
1702 1704 # Get remote phases data from remote
1703 1705 remotephases = remote.listkeys('phases')
1704 1706 publishing = bool(remotephases.get('publishing', False))
1705 1707 if remotephases and not publishing:
1706 1708 # remote is new and unpublishing
1707 1709 pheads, _dr = phases.analyzeremotephases(self, subset,
1708 1710 remotephases)
1709 1711 phases.advanceboundary(self, phases.public, pheads)
1710 1712 phases.advanceboundary(self, phases.draft, subset)
1711 1713 else:
1712 1714 # Remote is old or publishing all common changesets
1713 1715 # should be seen as public
1714 1716 phases.advanceboundary(self, phases.public, subset)
1715 1717
1716 1718 def gettransaction():
1717 1719 if tr is None:
1718 1720 return self.transaction(trname)
1719 1721 return tr
1720 1722
1721 1723 obstr = obsolete.syncpull(self, remote, gettransaction)
1722 1724 if obstr is not None:
1723 1725 tr = obstr
1724 1726
1725 1727 if tr is not None:
1726 1728 tr.close()
1727 1729 finally:
1728 1730 if tr is not None:
1729 1731 tr.release()
1730 1732 lock.release()
1731 1733
1732 1734 return result
1733 1735
1734 1736 def checkpush(self, force, revs):
1735 1737 """Extensions can override this function if additional checks have
1736 1738 to be performed before pushing, or call it if they override push
1737 1739 command.
1738 1740 """
1739 1741 pass
1740 1742
1741 1743 def push(self, remote, force=False, revs=None, newbranch=False):
1742 1744 '''Push outgoing changesets (limited by revs) from the current
1743 1745 repository to remote. Return an integer:
1744 1746 - None means nothing to push
1745 1747 - 0 means HTTP error
1746 1748 - 1 means we pushed and remote head count is unchanged *or*
1747 1749 we have outgoing changesets but refused to push
1748 1750 - other values as described by addchangegroup()
1749 1751 '''
1750 1752 # there are two ways to push to remote repo:
1751 1753 #
1752 1754 # addchangegroup assumes local user can lock remote
1753 1755 # repo (local filesystem, old ssh servers).
1754 1756 #
1755 1757 # unbundle assumes local user cannot lock remote repo (new ssh
1756 1758 # servers, http servers).
1757 1759
1758 1760 if not remote.canpush():
1759 1761 raise util.Abort(_("destination does not support push"))
1760 1762 unfi = self.unfiltered()
1761 1763 def localphasemove(nodes, phase=phases.public):
1762 1764 """move <nodes> to <phase> in the local source repo"""
1763 1765 if locallock is not None:
1764 1766 phases.advanceboundary(self, phase, nodes)
1765 1767 else:
1766 1768 # repo is not locked, do not change any phases!
1767 1769 # Informs the user that phases should have been moved when
1768 1770 # applicable.
1769 1771 actualmoves = [n for n in nodes if phase < self[n].phase()]
1770 1772 phasestr = phases.phasenames[phase]
1771 1773 if actualmoves:
1772 1774 self.ui.status(_('cannot lock source repo, skipping local'
1773 1775 ' %s phase update\n') % phasestr)
1774 1776 # get local lock as we might write phase data
1775 1777 locallock = None
1776 1778 try:
1777 1779 locallock = self.lock()
1778 1780 except IOError, err:
1779 1781 if err.errno != errno.EACCES:
1780 1782 raise
1781 1783 # source repo cannot be locked.
1782 1784 # We do not abort the push, but just disable the local phase
1783 1785 # synchronisation.
1784 1786 msg = 'cannot lock source repository: %s\n' % err
1785 1787 self.ui.debug(msg)
1786 1788 try:
1787 1789 self.checkpush(force, revs)
1788 1790 lock = None
1789 1791 unbundle = remote.capable('unbundle')
1790 1792 if not unbundle:
1791 1793 lock = remote.lock()
1792 1794 try:
1793 1795 # discovery
1794 1796 fci = discovery.findcommonincoming
1795 1797 commoninc = fci(unfi, remote, force=force)
1796 1798 common, inc, remoteheads = commoninc
1797 1799 fco = discovery.findcommonoutgoing
1798 1800 outgoing = fco(unfi, remote, onlyheads=revs,
1799 1801 commoninc=commoninc, force=force)
1800 1802
1801 1803
1802 1804 if not outgoing.missing:
1803 1805 # nothing to push
1804 1806 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1805 1807 ret = None
1806 1808 else:
1807 1809 # something to push
1808 1810 if not force:
1809 1811 # if self.obsstore == False --> no obsolete
1810 1812 # then, save the iteration
1811 1813 if unfi.obsstore:
1812 1814 # this message are here for 80 char limit reason
1813 1815 mso = _("push includes obsolete changeset: %s!")
1814 1816 mst = "push includes %s changeset: %s!"
1815 1817 # plain versions for i18n tool to detect them
1816 1818 _("push includes unstable changeset: %s!")
1817 1819 _("push includes bumped changeset: %s!")
1818 1820 _("push includes divergent changeset: %s!")
1819 1821 # If we are to push if there is at least one
1820 1822 # obsolete or unstable changeset in missing, at
1821 1823 # least one of the missinghead will be obsolete or
1822 1824 # unstable. So checking heads only is ok
1823 1825 for node in outgoing.missingheads:
1824 1826 ctx = unfi[node]
1825 1827 if ctx.obsolete():
1826 1828 raise util.Abort(mso % ctx)
1827 1829 elif ctx.troubled():
1828 1830 raise util.Abort(_(mst)
1829 1831 % (ctx.troubles()[0],
1830 1832 ctx))
1831 1833 discovery.checkheads(unfi, remote, outgoing,
1832 1834 remoteheads, newbranch,
1833 1835 bool(inc))
1834 1836
1835 1837 # TODO: get bundlecaps from remote
1836 1838 bundlecaps = None
1837 1839 # create a changegroup from local
1838 1840 if revs is None and not outgoing.excluded:
1839 1841 # push everything,
1840 1842 # use the fast path, no race possible on push
1841 1843 bundler = changegroup.bundle10(self, bundlecaps)
1842 1844 cg = self._changegroupsubset(outgoing,
1843 1845 bundler,
1844 1846 'push',
1845 1847 fastpath=True)
1846 1848 else:
1847 1849 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1848 1850
1849 1851 # apply changegroup to remote
1850 1852 if unbundle:
1851 1853 # local repo finds heads on server, finds out what
1852 1854 # revs it must push. once revs transferred, if server
1853 1855 # finds it has different heads (someone else won
1854 1856 # commit/push race), server aborts.
1855 1857 if force:
1856 1858 remoteheads = ['force']
1857 1859 # ssh: return remote's addchangegroup()
1858 1860 # http: return remote's addchangegroup() or 0 for error
1859 1861 ret = remote.unbundle(cg, remoteheads, 'push')
1860 1862 else:
1861 1863 # we return an integer indicating remote head count
1862 1864 # change
1863 1865 ret = remote.addchangegroup(cg, 'push', self.url())
1864 1866
1865 1867 if ret:
1866 1868 # push succeed, synchronize target of the push
1867 1869 cheads = outgoing.missingheads
1868 1870 elif revs is None:
1869 1871 # All out push fails. synchronize all common
1870 1872 cheads = outgoing.commonheads
1871 1873 else:
1872 1874 # I want cheads = heads(::missingheads and ::commonheads)
1873 1875 # (missingheads is revs with secret changeset filtered out)
1874 1876 #
1875 1877 # This can be expressed as:
1876 1878 # cheads = ( (missingheads and ::commonheads)
1877 1879 # + (commonheads and ::missingheads))"
1878 1880 # )
1879 1881 #
1880 1882 # while trying to push we already computed the following:
1881 1883 # common = (::commonheads)
1882 1884 # missing = ((commonheads::missingheads) - commonheads)
1883 1885 #
1884 1886 # We can pick:
1885 1887 # * missingheads part of common (::commonheads)
1886 1888 common = set(outgoing.common)
1887 1889 cheads = [node for node in revs if node in common]
1888 1890 # and
1889 1891 # * commonheads parents on missing
1890 1892 revset = unfi.set('%ln and parents(roots(%ln))',
1891 1893 outgoing.commonheads,
1892 1894 outgoing.missing)
1893 1895 cheads.extend(c.node() for c in revset)
1894 1896 # even when we don't push, exchanging phase data is useful
1895 1897 remotephases = remote.listkeys('phases')
1896 1898 if (self.ui.configbool('ui', '_usedassubrepo', False)
1897 1899 and remotephases # server supports phases
1898 1900 and ret is None # nothing was pushed
1899 1901 and remotephases.get('publishing', False)):
1900 1902 # When:
1901 1903 # - this is a subrepo push
1902 1904 # - and remote support phase
1903 1905 # - and no changeset was pushed
1904 1906 # - and remote is publishing
1905 1907 # We may be in issue 3871 case!
1906 1908 # We drop the possible phase synchronisation done by
1907 1909 # courtesy to publish changesets possibly locally draft
1908 1910 # on the remote.
1909 1911 remotephases = {'publishing': 'True'}
1910 1912 if not remotephases: # old server or public only repo
1911 1913 localphasemove(cheads)
1912 1914 # don't push any phase data as there is nothing to push
1913 1915 else:
1914 1916 ana = phases.analyzeremotephases(self, cheads, remotephases)
1915 1917 pheads, droots = ana
1916 1918 ### Apply remote phase on local
1917 1919 if remotephases.get('publishing', False):
1918 1920 localphasemove(cheads)
1919 1921 else: # publish = False
1920 1922 localphasemove(pheads)
1921 1923 localphasemove(cheads, phases.draft)
1922 1924 ### Apply local phase on remote
1923 1925
1924 1926 # Get the list of all revs draft on remote by public here.
1925 1927 # XXX Beware that revset break if droots is not strictly
1926 1928 # XXX root we may want to ensure it is but it is costly
1927 1929 outdated = unfi.set('heads((%ln::%ln) and public())',
1928 1930 droots, cheads)
1929 1931 for newremotehead in outdated:
1930 1932 r = remote.pushkey('phases',
1931 1933 newremotehead.hex(),
1932 1934 str(phases.draft),
1933 1935 str(phases.public))
1934 1936 if not r:
1935 1937 self.ui.warn(_('updating %s to public failed!\n')
1936 1938 % newremotehead)
1937 1939 self.ui.debug('try to push obsolete markers to remote\n')
1938 1940 obsolete.syncpush(self, remote)
1939 1941 finally:
1940 1942 if lock is not None:
1941 1943 lock.release()
1942 1944 finally:
1943 1945 if locallock is not None:
1944 1946 locallock.release()
1945 1947
1946 1948 self.ui.debug("checking for updated bookmarks\n")
1947 1949 rb = remote.listkeys('bookmarks')
1948 1950 revnums = map(unfi.changelog.rev, revs or [])
1949 1951 ancestors = [
1950 1952 a for a in unfi.changelog.ancestors(revnums, inclusive=True)]
1951 1953 for k in rb.keys():
1952 1954 if k in unfi._bookmarks:
1953 1955 nr, nl = rb[k], hex(self._bookmarks[k])
1954 1956 if nr in unfi:
1955 1957 cr = unfi[nr]
1956 1958 cl = unfi[nl]
1957 1959 if bookmarks.validdest(unfi, cr, cl):
1958 1960 if ancestors and cl.rev() not in ancestors:
1959 1961 continue
1960 1962 r = remote.pushkey('bookmarks', k, nr, nl)
1961 1963 if r:
1962 1964 self.ui.status(_("updating bookmark %s\n") % k)
1963 1965 else:
1964 1966 self.ui.warn(_('updating bookmark %s'
1965 1967 ' failed!\n') % k)
1966 1968
1967 1969 return ret
1968 1970
1969 1971 def changegroupinfo(self, nodes, source):
1970 1972 if self.ui.verbose or source == 'bundle':
1971 1973 self.ui.status(_("%d changesets found\n") % len(nodes))
1972 1974 if self.ui.debugflag:
1973 1975 self.ui.debug("list of changesets:\n")
1974 1976 for node in nodes:
1975 1977 self.ui.debug("%s\n" % hex(node))
1976 1978
1977 1979 def changegroupsubset(self, bases, heads, source):
1978 1980 """Compute a changegroup consisting of all the nodes that are
1979 1981 descendants of any of the bases and ancestors of any of the heads.
1980 1982 Return a chunkbuffer object whose read() method will return
1981 1983 successive changegroup chunks.
1982 1984
1983 1985 It is fairly complex as determining which filenodes and which
1984 1986 manifest nodes need to be included for the changeset to be complete
1985 1987 is non-trivial.
1986 1988
1987 1989 Another wrinkle is doing the reverse, figuring out which changeset in
1988 1990 the changegroup a particular filenode or manifestnode belongs to.
1989 1991 """
1990 1992 cl = self.changelog
1991 1993 if not bases:
1992 1994 bases = [nullid]
1993 1995 # TODO: remove call to nodesbetween.
1994 1996 csets, bases, heads = cl.nodesbetween(bases, heads)
1995 1997 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
1996 1998 outgoing = discovery.outgoing(cl, bases, heads)
1997 1999 bundler = changegroup.bundle10(self)
1998 2000 return self._changegroupsubset(outgoing, bundler, source)
1999 2001
2000 2002 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2001 2003 """Like getbundle, but taking a discovery.outgoing as an argument.
2002 2004
2003 2005 This is only implemented for local repos and reuses potentially
2004 2006 precomputed sets in outgoing."""
2005 2007 if not outgoing.missing:
2006 2008 return None
2007 2009 bundler = changegroup.bundle10(self, bundlecaps)
2008 2010 return self._changegroupsubset(outgoing, bundler, source)
2009 2011
2010 2012 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2011 2013 """Like changegroupsubset, but returns the set difference between the
2012 2014 ancestors of heads and the ancestors common.
2013 2015
2014 2016 If heads is None, use the local heads. If common is None, use [nullid].
2015 2017
2016 2018 The nodes in common might not all be known locally due to the way the
2017 2019 current discovery protocol works.
2018 2020 """
2019 2021 cl = self.changelog
2020 2022 if common:
2021 2023 hasnode = cl.hasnode
2022 2024 common = [n for n in common if hasnode(n)]
2023 2025 else:
2024 2026 common = [nullid]
2025 2027 if not heads:
2026 2028 heads = cl.heads()
2027 2029 return self.getlocalbundle(source,
2028 2030 discovery.outgoing(cl, common, heads),
2029 2031 bundlecaps=bundlecaps)
2030 2032
2031 2033 @unfilteredmethod
2032 2034 def _changegroupsubset(self, outgoing, bundler, source,
2033 2035 fastpath=False):
2034 2036 commonrevs = outgoing.common
2035 2037 csets = outgoing.missing
2036 2038 heads = outgoing.missingheads
2037 2039 # We go through the fast path if we get told to, or if all (unfiltered
2038 2040 # heads have been requested (since we then know there all linkrevs will
2039 2041 # be pulled by the client).
2040 2042 heads.sort()
2041 2043 fastpathlinkrev = fastpath or (
2042 2044 self.filtername is None and heads == sorted(self.heads()))
2043 2045
2044 2046 self.hook('preoutgoing', throw=True, source=source)
2045 2047 self.changegroupinfo(csets, source)
2046 2048 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2047 2049 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2048 2050
2049 2051 def changegroup(self, basenodes, source):
2050 2052 # to avoid a race we use changegroupsubset() (issue1320)
2051 2053 return self.changegroupsubset(basenodes, self.heads(), source)
2052 2054
2053 2055 @unfilteredmethod
2054 2056 def addchangegroup(self, source, srctype, url, emptyok=False):
2055 2057 """Add the changegroup returned by source.read() to this repo.
2056 2058 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2057 2059 the URL of the repo where this changegroup is coming from.
2058 2060
2059 2061 Return an integer summarizing the change to this repo:
2060 2062 - nothing changed or no source: 0
2061 2063 - more heads than before: 1+added heads (2..n)
2062 2064 - fewer heads than before: -1-removed heads (-2..-n)
2063 2065 - number of heads stays the same: 1
2064 2066 """
2065 2067 def csmap(x):
2066 2068 self.ui.debug("add changeset %s\n" % short(x))
2067 2069 return len(cl)
2068 2070
2069 2071 def revmap(x):
2070 2072 return cl.rev(x)
2071 2073
2072 2074 if not source:
2073 2075 return 0
2074 2076
2075 2077 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2076 2078
2077 2079 changesets = files = revisions = 0
2078 2080 efiles = set()
2079 2081
2080 2082 # write changelog data to temp files so concurrent readers will not see
2081 2083 # inconsistent view
2082 2084 cl = self.changelog
2083 2085 cl.delayupdate()
2084 2086 oldheads = cl.heads()
2085 2087
2086 2088 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2087 2089 try:
2088 2090 trp = weakref.proxy(tr)
2089 2091 # pull off the changeset group
2090 2092 self.ui.status(_("adding changesets\n"))
2091 2093 clstart = len(cl)
2092 2094 class prog(object):
2093 2095 step = _('changesets')
2094 2096 count = 1
2095 2097 ui = self.ui
2096 2098 total = None
2097 2099 def __call__(self):
2098 2100 self.ui.progress(self.step, self.count, unit=_('chunks'),
2099 2101 total=self.total)
2100 2102 self.count += 1
2101 2103 pr = prog()
2102 2104 source.callback = pr
2103 2105
2104 2106 source.changelogheader()
2105 2107 srccontent = cl.addgroup(source, csmap, trp)
2106 2108 if not (srccontent or emptyok):
2107 2109 raise util.Abort(_("received changelog group is empty"))
2108 2110 clend = len(cl)
2109 2111 changesets = clend - clstart
2110 2112 for c in xrange(clstart, clend):
2111 2113 efiles.update(self[c].files())
2112 2114 efiles = len(efiles)
2113 2115 self.ui.progress(_('changesets'), None)
2114 2116
2115 2117 # pull off the manifest group
2116 2118 self.ui.status(_("adding manifests\n"))
2117 2119 pr.step = _('manifests')
2118 2120 pr.count = 1
2119 2121 pr.total = changesets # manifests <= changesets
2120 2122 # no need to check for empty manifest group here:
2121 2123 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2122 2124 # no new manifest will be created and the manifest group will
2123 2125 # be empty during the pull
2124 2126 source.manifestheader()
2125 2127 self.manifest.addgroup(source, revmap, trp)
2126 2128 self.ui.progress(_('manifests'), None)
2127 2129
2128 2130 needfiles = {}
2129 2131 if self.ui.configbool('server', 'validate', default=False):
2130 2132 # validate incoming csets have their manifests
2131 2133 for cset in xrange(clstart, clend):
2132 2134 mfest = self.changelog.read(self.changelog.node(cset))[0]
2133 2135 mfest = self.manifest.readdelta(mfest)
2134 2136 # store file nodes we must see
2135 2137 for f, n in mfest.iteritems():
2136 2138 needfiles.setdefault(f, set()).add(n)
2137 2139
2138 2140 # process the files
2139 2141 self.ui.status(_("adding file changes\n"))
2140 2142 pr.step = _('files')
2141 2143 pr.count = 1
2142 2144 pr.total = efiles
2143 2145 source.callback = None
2144 2146
2145 2147 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2146 2148 pr, needfiles)
2147 2149 revisions += newrevs
2148 2150 files += newfiles
2149 2151
2150 2152 dh = 0
2151 2153 if oldheads:
2152 2154 heads = cl.heads()
2153 2155 dh = len(heads) - len(oldheads)
2154 2156 for h in heads:
2155 2157 if h not in oldheads and self[h].closesbranch():
2156 2158 dh -= 1
2157 2159 htext = ""
2158 2160 if dh:
2159 2161 htext = _(" (%+d heads)") % dh
2160 2162
2161 2163 self.ui.status(_("added %d changesets"
2162 2164 " with %d changes to %d files%s\n")
2163 2165 % (changesets, revisions, files, htext))
2164 2166 self.invalidatevolatilesets()
2165 2167
2166 2168 if changesets > 0:
2167 2169 p = lambda: cl.writepending() and self.root or ""
2168 2170 self.hook('pretxnchangegroup', throw=True,
2169 2171 node=hex(cl.node(clstart)), source=srctype,
2170 2172 url=url, pending=p)
2171 2173
2172 2174 added = [cl.node(r) for r in xrange(clstart, clend)]
2173 2175 publishing = self.ui.configbool('phases', 'publish', True)
2174 2176 if srctype == 'push':
2175 2177 # Old server can not push the boundary themself.
2176 2178 # New server won't push the boundary if changeset already
2177 2179 # existed locally as secrete
2178 2180 #
2179 2181 # We should not use added here but the list of all change in
2180 2182 # the bundle
2181 2183 if publishing:
2182 2184 phases.advanceboundary(self, phases.public, srccontent)
2183 2185 else:
2184 2186 phases.advanceboundary(self, phases.draft, srccontent)
2185 2187 phases.retractboundary(self, phases.draft, added)
2186 2188 elif srctype != 'strip':
2187 2189 # publishing only alter behavior during push
2188 2190 #
2189 2191 # strip should not touch boundary at all
2190 2192 phases.retractboundary(self, phases.draft, added)
2191 2193
2192 2194 # make changelog see real files again
2193 2195 cl.finalize(trp)
2194 2196
2195 2197 tr.close()
2196 2198
2197 2199 if changesets > 0:
2198 2200 if srctype != 'strip':
2199 2201 # During strip, branchcache is invalid but coming call to
2200 2202 # `destroyed` will repair it.
2201 2203 # In other case we can safely update cache on disk.
2202 2204 branchmap.updatecache(self.filtered('served'))
2203 2205 def runhooks():
2204 2206 # forcefully update the on-disk branch cache
2205 2207 self.ui.debug("updating the branch cache\n")
2206 2208 self.hook("changegroup", node=hex(cl.node(clstart)),
2207 2209 source=srctype, url=url)
2208 2210
2209 2211 for n in added:
2210 2212 self.hook("incoming", node=hex(n), source=srctype,
2211 2213 url=url)
2212 2214
2213 2215 newheads = [h for h in self.heads() if h not in oldheads]
2214 2216 self.ui.log("incoming",
2215 2217 "%s incoming changes - new heads: %s\n",
2216 2218 len(added),
2217 2219 ', '.join([hex(c[:6]) for c in newheads]))
2218 2220 self._afterlock(runhooks)
2219 2221
2220 2222 finally:
2221 2223 tr.release()
2222 2224 # never return 0 here:
2223 2225 if dh < 0:
2224 2226 return dh - 1
2225 2227 else:
2226 2228 return dh + 1
2227 2229
2228 2230 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2229 2231 revisions = 0
2230 2232 files = 0
2231 2233 while True:
2232 2234 chunkdata = source.filelogheader()
2233 2235 if not chunkdata:
2234 2236 break
2235 2237 f = chunkdata["filename"]
2236 2238 self.ui.debug("adding %s revisions\n" % f)
2237 2239 pr()
2238 2240 fl = self.file(f)
2239 2241 o = len(fl)
2240 2242 if not fl.addgroup(source, revmap, trp):
2241 2243 raise util.Abort(_("received file revlog group is empty"))
2242 2244 revisions += len(fl) - o
2243 2245 files += 1
2244 2246 if f in needfiles:
2245 2247 needs = needfiles[f]
2246 2248 for new in xrange(o, len(fl)):
2247 2249 n = fl.node(new)
2248 2250 if n in needs:
2249 2251 needs.remove(n)
2250 2252 else:
2251 2253 raise util.Abort(
2252 2254 _("received spurious file revlog entry"))
2253 2255 if not needs:
2254 2256 del needfiles[f]
2255 2257 self.ui.progress(_('files'), None)
2256 2258
2257 2259 for f, needs in needfiles.iteritems():
2258 2260 fl = self.file(f)
2259 2261 for n in needs:
2260 2262 try:
2261 2263 fl.rev(n)
2262 2264 except error.LookupError:
2263 2265 raise util.Abort(
2264 2266 _('missing file data for %s:%s - run hg verify') %
2265 2267 (f, hex(n)))
2266 2268
2267 2269 return revisions, files
2268 2270
2269 2271 def stream_in(self, remote, requirements):
2270 2272 lock = self.lock()
2271 2273 try:
2272 2274 # Save remote branchmap. We will use it later
2273 2275 # to speed up branchcache creation
2274 2276 rbranchmap = None
2275 2277 if remote.capable("branchmap"):
2276 2278 rbranchmap = remote.branchmap()
2277 2279
2278 2280 fp = remote.stream_out()
2279 2281 l = fp.readline()
2280 2282 try:
2281 2283 resp = int(l)
2282 2284 except ValueError:
2283 2285 raise error.ResponseError(
2284 2286 _('unexpected response from remote server:'), l)
2285 2287 if resp == 1:
2286 2288 raise util.Abort(_('operation forbidden by server'))
2287 2289 elif resp == 2:
2288 2290 raise util.Abort(_('locking the remote repository failed'))
2289 2291 elif resp != 0:
2290 2292 raise util.Abort(_('the server sent an unknown error code'))
2291 2293 self.ui.status(_('streaming all changes\n'))
2292 2294 l = fp.readline()
2293 2295 try:
2294 2296 total_files, total_bytes = map(int, l.split(' ', 1))
2295 2297 except (ValueError, TypeError):
2296 2298 raise error.ResponseError(
2297 2299 _('unexpected response from remote server:'), l)
2298 2300 self.ui.status(_('%d files to transfer, %s of data\n') %
2299 2301 (total_files, util.bytecount(total_bytes)))
2300 2302 handled_bytes = 0
2301 2303 self.ui.progress(_('clone'), 0, total=total_bytes)
2302 2304 start = time.time()
2303 2305 for i in xrange(total_files):
2304 2306 # XXX doesn't support '\n' or '\r' in filenames
2305 2307 l = fp.readline()
2306 2308 try:
2307 2309 name, size = l.split('\0', 1)
2308 2310 size = int(size)
2309 2311 except (ValueError, TypeError):
2310 2312 raise error.ResponseError(
2311 2313 _('unexpected response from remote server:'), l)
2312 2314 if self.ui.debugflag:
2313 2315 self.ui.debug('adding %s (%s)\n' %
2314 2316 (name, util.bytecount(size)))
2315 2317 # for backwards compat, name was partially encoded
2316 2318 ofp = self.sopener(store.decodedir(name), 'w')
2317 2319 for chunk in util.filechunkiter(fp, limit=size):
2318 2320 handled_bytes += len(chunk)
2319 2321 self.ui.progress(_('clone'), handled_bytes,
2320 2322 total=total_bytes)
2321 2323 ofp.write(chunk)
2322 2324 ofp.close()
2323 2325 elapsed = time.time() - start
2324 2326 if elapsed <= 0:
2325 2327 elapsed = 0.001
2326 2328 self.ui.progress(_('clone'), None)
2327 2329 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2328 2330 (util.bytecount(total_bytes), elapsed,
2329 2331 util.bytecount(total_bytes / elapsed)))
2330 2332
2331 2333 # new requirements = old non-format requirements +
2332 2334 # new format-related
2333 2335 # requirements from the streamed-in repository
2334 2336 requirements.update(set(self.requirements) - self.supportedformats)
2335 2337 self._applyrequirements(requirements)
2336 2338 self._writerequirements()
2337 2339
2338 2340 if rbranchmap:
2339 2341 rbheads = []
2340 2342 for bheads in rbranchmap.itervalues():
2341 2343 rbheads.extend(bheads)
2342 2344
2343 2345 if rbheads:
2344 2346 rtiprev = max((int(self.changelog.rev(node))
2345 2347 for node in rbheads))
2346 2348 cache = branchmap.branchcache(rbranchmap,
2347 2349 self[rtiprev].node(),
2348 2350 rtiprev)
2349 2351 # Try to stick it as low as possible
2350 2352 # filter above served are unlikely to be fetch from a clone
2351 2353 for candidate in ('base', 'immutable', 'served'):
2352 2354 rview = self.filtered(candidate)
2353 2355 if cache.validfor(rview):
2354 2356 self._branchcaches[candidate] = cache
2355 2357 cache.write(rview)
2356 2358 break
2357 2359 self.invalidate()
2358 2360 return len(self.heads()) + 1
2359 2361 finally:
2360 2362 lock.release()
2361 2363
2362 2364 def clone(self, remote, heads=[], stream=False):
2363 2365 '''clone remote repository.
2364 2366
2365 2367 keyword arguments:
2366 2368 heads: list of revs to clone (forces use of pull)
2367 2369 stream: use streaming clone if possible'''
2368 2370
2369 2371 # now, all clients that can request uncompressed clones can
2370 2372 # read repo formats supported by all servers that can serve
2371 2373 # them.
2372 2374
2373 2375 # if revlog format changes, client will have to check version
2374 2376 # and format flags on "stream" capability, and use
2375 2377 # uncompressed only if compatible.
2376 2378
2377 2379 if not stream:
2378 2380 # if the server explicitly prefers to stream (for fast LANs)
2379 2381 stream = remote.capable('stream-preferred')
2380 2382
2381 2383 if stream and not heads:
2382 2384 # 'stream' means remote revlog format is revlogv1 only
2383 2385 if remote.capable('stream'):
2384 2386 return self.stream_in(remote, set(('revlogv1',)))
2385 2387 # otherwise, 'streamreqs' contains the remote revlog format
2386 2388 streamreqs = remote.capable('streamreqs')
2387 2389 if streamreqs:
2388 2390 streamreqs = set(streamreqs.split(','))
2389 2391 # if we support it, stream in and adjust our requirements
2390 2392 if not streamreqs - self.supportedformats:
2391 2393 return self.stream_in(remote, streamreqs)
2392 2394 return self.pull(remote, heads)
2393 2395
2394 2396 def pushkey(self, namespace, key, old, new):
2395 2397 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2396 2398 old=old, new=new)
2397 2399 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2398 2400 ret = pushkey.push(self, namespace, key, old, new)
2399 2401 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2400 2402 ret=ret)
2401 2403 return ret
2402 2404
2403 2405 def listkeys(self, namespace):
2404 2406 self.hook('prelistkeys', throw=True, namespace=namespace)
2405 2407 self.ui.debug('listing keys for "%s"\n' % namespace)
2406 2408 values = pushkey.list(self, namespace)
2407 2409 self.hook('listkeys', namespace=namespace, values=values)
2408 2410 return values
2409 2411
2410 2412 def debugwireargs(self, one, two, three=None, four=None, five=None):
2411 2413 '''used to test argument passing over the wire'''
2412 2414 return "%s %s %s %s %s" % (one, two, three, four, five)
2413 2415
2414 2416 def savecommitmessage(self, text):
2415 2417 fp = self.opener('last-message.txt', 'wb')
2416 2418 try:
2417 2419 fp.write(text)
2418 2420 finally:
2419 2421 fp.close()
2420 2422 return self.pathto(fp.name[len(self.root) + 1:])
2421 2423
2422 2424 # used to avoid circular references so destructors work
2423 2425 def aftertrans(files):
2424 2426 renamefiles = [tuple(t) for t in files]
2425 2427 def a():
2426 2428 for vfs, src, dest in renamefiles:
2427 2429 try:
2428 2430 vfs.rename(src, dest)
2429 2431 except OSError: # journal file does not yet exist
2430 2432 pass
2431 2433 return a
2432 2434
2433 2435 def undoname(fn):
2434 2436 base, name = os.path.split(fn)
2435 2437 assert name.startswith('journal')
2436 2438 return os.path.join(base, name.replace('journal', 'undo', 1))
2437 2439
2438 2440 def instance(ui, path, create):
2439 2441 return localrepository(ui, util.urllocalpath(path), create)
2440 2442
2441 2443 def islocal(path):
2442 2444 return True
General Comments 0
You need to be logged in to leave comments. Login now