##// END OF EJS Templates
localrepo: remove unnecessary check of instance...
Sean Farley -
r19569:00140039 default
parent child Browse files
Show More
@@ -1,2448 +1,2442 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if a repo has an unfilteredpropertycache value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo.filtered('served')
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return self._repo.branchmap()
95 95
96 96 def heads(self):
97 97 return self._repo.heads()
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common,
104 104 bundlecaps=None)
105 105
106 106 # TODO We might want to move the next two calls into legacypeer and add
107 107 # unbundle instead.
108 108
109 109 def lock(self):
110 110 return self._repo.lock()
111 111
112 112 def addchangegroup(self, cg, source, url):
113 113 return self._repo.addchangegroup(cg, source, url)
114 114
115 115 def pushkey(self, namespace, key, old, new):
116 116 return self._repo.pushkey(namespace, key, old, new)
117 117
118 118 def listkeys(self, namespace):
119 119 return self._repo.listkeys(namespace)
120 120
121 121 def debugwireargs(self, one, two, three=None, four=None, five=None):
122 122 '''used to test argument passing over the wire'''
123 123 return "%s %s %s %s %s" % (one, two, three, four, five)
124 124
125 125 class locallegacypeer(localpeer):
126 126 '''peer extension which implements legacy methods too; used for tests with
127 127 restricted capabilities'''
128 128
129 129 def __init__(self, repo):
130 130 localpeer.__init__(self, repo, caps=LEGACYCAPS)
131 131
132 132 def branches(self, nodes):
133 133 return self._repo.branches(nodes)
134 134
135 135 def between(self, pairs):
136 136 return self._repo.between(pairs)
137 137
138 138 def changegroup(self, basenodes, source):
139 139 return self._repo.changegroup(basenodes, source)
140 140
141 141 def changegroupsubset(self, bases, heads, source):
142 142 return self._repo.changegroupsubset(bases, heads, source)
143 143
144 144 class localrepository(object):
145 145
146 146 supportedformats = set(('revlogv1', 'generaldelta'))
147 147 supported = supportedformats | set(('store', 'fncache', 'shared',
148 148 'dotencode'))
149 149 openerreqs = set(('revlogv1', 'generaldelta'))
150 150 requirements = ['revlogv1']
151 151 filtername = None
152 152
153 153 def _baserequirements(self, create):
154 154 return self.requirements[:]
155 155
156 156 def __init__(self, baseui, path=None, create=False):
157 157 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
158 158 self.wopener = self.wvfs
159 159 self.root = self.wvfs.base
160 160 self.path = self.wvfs.join(".hg")
161 161 self.origroot = path
162 162 self.auditor = scmutil.pathauditor(self.root, self._checknested)
163 163 self.vfs = scmutil.vfs(self.path)
164 164 self.opener = self.vfs
165 165 self.baseui = baseui
166 166 self.ui = baseui.copy()
167 167 # A list of callback to shape the phase if no data were found.
168 168 # Callback are in the form: func(repo, roots) --> processed root.
169 169 # This list it to be filled by extension during repo setup
170 170 self._phasedefaults = []
171 171 try:
172 172 self.ui.readconfig(self.join("hgrc"), self.root)
173 173 extensions.loadall(self.ui)
174 174 except IOError:
175 175 pass
176 176
177 177 if not self.vfs.isdir():
178 178 if create:
179 179 if not self.wvfs.exists():
180 180 self.wvfs.makedirs()
181 181 self.vfs.makedir(notindexed=True)
182 182 requirements = self._baserequirements(create)
183 183 if self.ui.configbool('format', 'usestore', True):
184 184 self.vfs.mkdir("store")
185 185 requirements.append("store")
186 186 if self.ui.configbool('format', 'usefncache', True):
187 187 requirements.append("fncache")
188 188 if self.ui.configbool('format', 'dotencode', True):
189 189 requirements.append('dotencode')
190 190 # create an invalid changelog
191 191 self.vfs.append(
192 192 "00changelog.i",
193 193 '\0\0\0\2' # represents revlogv2
194 194 ' dummy changelog to prevent using the old repo layout'
195 195 )
196 196 if self.ui.configbool('format', 'generaldelta', False):
197 197 requirements.append("generaldelta")
198 198 requirements = set(requirements)
199 199 else:
200 200 raise error.RepoError(_("repository %s not found") % path)
201 201 elif create:
202 202 raise error.RepoError(_("repository %s already exists") % path)
203 203 else:
204 204 try:
205 205 requirements = scmutil.readrequires(self.vfs, self.supported)
206 206 except IOError, inst:
207 207 if inst.errno != errno.ENOENT:
208 208 raise
209 209 requirements = set()
210 210
211 211 self.sharedpath = self.path
212 212 try:
213 213 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
214 214 realpath=True)
215 215 s = vfs.base
216 216 if not vfs.exists():
217 217 raise error.RepoError(
218 218 _('.hg/sharedpath points to nonexistent directory %s') % s)
219 219 self.sharedpath = s
220 220 except IOError, inst:
221 221 if inst.errno != errno.ENOENT:
222 222 raise
223 223
224 224 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
225 225 self.spath = self.store.path
226 226 self.svfs = self.store.vfs
227 227 self.sopener = self.svfs
228 228 self.sjoin = self.store.join
229 229 self.vfs.createmode = self.store.createmode
230 230 self._applyrequirements(requirements)
231 231 if create:
232 232 self._writerequirements()
233 233
234 234
235 235 self._branchcaches = {}
236 236 self.filterpats = {}
237 237 self._datafilters = {}
238 238 self._transref = self._lockref = self._wlockref = None
239 239
240 240 # A cache for various files under .hg/ that tracks file changes,
241 241 # (used by the filecache decorator)
242 242 #
243 243 # Maps a property name to its util.filecacheentry
244 244 self._filecache = {}
245 245
246 246 # hold sets of revision to be filtered
247 247 # should be cleared when something might have changed the filter value:
248 248 # - new changesets,
249 249 # - phase change,
250 250 # - new obsolescence marker,
251 251 # - working directory parent change,
252 252 # - bookmark changes
253 253 self.filteredrevcache = {}
254 254
255 255 def close(self):
256 256 pass
257 257
258 258 def _restrictcapabilities(self, caps):
259 259 return caps
260 260
261 261 def _applyrequirements(self, requirements):
262 262 self.requirements = requirements
263 263 self.sopener.options = dict((r, 1) for r in requirements
264 264 if r in self.openerreqs)
265 265
266 266 def _writerequirements(self):
267 267 reqfile = self.opener("requires", "w")
268 268 for r in sorted(self.requirements):
269 269 reqfile.write("%s\n" % r)
270 270 reqfile.close()
271 271
272 272 def _checknested(self, path):
273 273 """Determine if path is a legal nested repository."""
274 274 if not path.startswith(self.root):
275 275 return False
276 276 subpath = path[len(self.root) + 1:]
277 277 normsubpath = util.pconvert(subpath)
278 278
279 279 # XXX: Checking against the current working copy is wrong in
280 280 # the sense that it can reject things like
281 281 #
282 282 # $ hg cat -r 10 sub/x.txt
283 283 #
284 284 # if sub/ is no longer a subrepository in the working copy
285 285 # parent revision.
286 286 #
287 287 # However, it can of course also allow things that would have
288 288 # been rejected before, such as the above cat command if sub/
289 289 # is a subrepository now, but was a normal directory before.
290 290 # The old path auditor would have rejected by mistake since it
291 291 # panics when it sees sub/.hg/.
292 292 #
293 293 # All in all, checking against the working copy seems sensible
294 294 # since we want to prevent access to nested repositories on
295 295 # the filesystem *now*.
296 296 ctx = self[None]
297 297 parts = util.splitpath(subpath)
298 298 while parts:
299 299 prefix = '/'.join(parts)
300 300 if prefix in ctx.substate:
301 301 if prefix == normsubpath:
302 302 return True
303 303 else:
304 304 sub = ctx.sub(prefix)
305 305 return sub.checknested(subpath[len(prefix) + 1:])
306 306 else:
307 307 parts.pop()
308 308 return False
309 309
310 310 def peer(self):
311 311 return localpeer(self) # not cached to avoid reference cycle
312 312
313 313 def unfiltered(self):
314 314 """Return unfiltered version of the repository
315 315
316 316 Intended to be overwritten by filtered repo."""
317 317 return self
318 318
319 319 def filtered(self, name):
320 320 """Return a filtered version of a repository"""
321 321 # build a new class with the mixin and the current class
322 322 # (possibly subclass of the repo)
323 323 class proxycls(repoview.repoview, self.unfiltered().__class__):
324 324 pass
325 325 return proxycls(self, name)
326 326
327 327 @repofilecache('bookmarks')
328 328 def _bookmarks(self):
329 329 return bookmarks.bmstore(self)
330 330
331 331 @repofilecache('bookmarks.current')
332 332 def _bookmarkcurrent(self):
333 333 return bookmarks.readcurrent(self)
334 334
335 335 def bookmarkheads(self, bookmark):
336 336 name = bookmark.split('@', 1)[0]
337 337 heads = []
338 338 for mark, n in self._bookmarks.iteritems():
339 339 if mark.split('@', 1)[0] == name:
340 340 heads.append(n)
341 341 return heads
342 342
343 343 @storecache('phaseroots')
344 344 def _phasecache(self):
345 345 return phases.phasecache(self, self._phasedefaults)
346 346
347 347 @storecache('obsstore')
348 348 def obsstore(self):
349 349 store = obsolete.obsstore(self.sopener)
350 350 if store and not obsolete._enabled:
351 351 # message is rare enough to not be translated
352 352 msg = 'obsolete feature not enabled but %i markers found!\n'
353 353 self.ui.warn(msg % len(list(store)))
354 354 return store
355 355
356 356 @storecache('00changelog.i')
357 357 def changelog(self):
358 358 c = changelog.changelog(self.sopener)
359 359 if 'HG_PENDING' in os.environ:
360 360 p = os.environ['HG_PENDING']
361 361 if p.startswith(self.root):
362 362 c.readpending('00changelog.i.a')
363 363 return c
364 364
365 365 @storecache('00manifest.i')
366 366 def manifest(self):
367 367 return manifest.manifest(self.sopener)
368 368
369 369 @repofilecache('dirstate')
370 370 def dirstate(self):
371 371 warned = [0]
372 372 def validate(node):
373 373 try:
374 374 self.changelog.rev(node)
375 375 return node
376 376 except error.LookupError:
377 377 if not warned[0]:
378 378 warned[0] = True
379 379 self.ui.warn(_("warning: ignoring unknown"
380 380 " working parent %s!\n") % short(node))
381 381 return nullid
382 382
383 383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384 384
385 385 def __getitem__(self, changeid):
386 386 if changeid is None:
387 387 return context.workingctx(self)
388 388 return context.changectx(self, changeid)
389 389
390 390 def __contains__(self, changeid):
391 391 try:
392 392 return bool(self.lookup(changeid))
393 393 except error.RepoLookupError:
394 394 return False
395 395
396 396 def __nonzero__(self):
397 397 return True
398 398
399 399 def __len__(self):
400 400 return len(self.changelog)
401 401
402 402 def __iter__(self):
403 403 return iter(self.changelog)
404 404
405 405 def revs(self, expr, *args):
406 406 '''Return a list of revisions matching the given revset'''
407 407 expr = revset.formatspec(expr, *args)
408 408 m = revset.match(None, expr)
409 409 return [r for r in m(self, list(self))]
410 410
411 411 def set(self, expr, *args):
412 412 '''
413 413 Yield a context for each matching revision, after doing arg
414 414 replacement via revset.formatspec
415 415 '''
416 416 for r in self.revs(expr, *args):
417 417 yield self[r]
418 418
419 419 def url(self):
420 420 return 'file:' + self.root
421 421
422 422 def hook(self, name, throw=False, **args):
423 423 return hook.hook(self.ui, self, name, throw, **args)
424 424
425 425 @unfilteredmethod
426 426 def _tag(self, names, node, message, local, user, date, extra={}):
427 427 if isinstance(names, str):
428 428 names = (names,)
429 429
430 430 branches = self.branchmap()
431 431 for name in names:
432 432 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 433 local=local)
434 434 if name in branches:
435 435 self.ui.warn(_("warning: tag %s conflicts with existing"
436 436 " branch name\n") % name)
437 437
438 438 def writetags(fp, names, munge, prevtags):
439 439 fp.seek(0, 2)
440 440 if prevtags and prevtags[-1] != '\n':
441 441 fp.write('\n')
442 442 for name in names:
443 443 m = munge and munge(name) or name
444 444 if (self._tagscache.tagtypes and
445 445 name in self._tagscache.tagtypes):
446 446 old = self.tags().get(name, nullid)
447 447 fp.write('%s %s\n' % (hex(old), m))
448 448 fp.write('%s %s\n' % (hex(node), m))
449 449 fp.close()
450 450
451 451 prevtags = ''
452 452 if local:
453 453 try:
454 454 fp = self.opener('localtags', 'r+')
455 455 except IOError:
456 456 fp = self.opener('localtags', 'a')
457 457 else:
458 458 prevtags = fp.read()
459 459
460 460 # local tags are stored in the current charset
461 461 writetags(fp, names, None, prevtags)
462 462 for name in names:
463 463 self.hook('tag', node=hex(node), tag=name, local=local)
464 464 return
465 465
466 466 try:
467 467 fp = self.wfile('.hgtags', 'rb+')
468 468 except IOError, e:
469 469 if e.errno != errno.ENOENT:
470 470 raise
471 471 fp = self.wfile('.hgtags', 'ab')
472 472 else:
473 473 prevtags = fp.read()
474 474
475 475 # committed tags are stored in UTF-8
476 476 writetags(fp, names, encoding.fromlocal, prevtags)
477 477
478 478 fp.close()
479 479
480 480 self.invalidatecaches()
481 481
482 482 if '.hgtags' not in self.dirstate:
483 483 self[None].add(['.hgtags'])
484 484
485 485 m = matchmod.exact(self.root, '', ['.hgtags'])
486 486 tagnode = self.commit(message, user, date, extra=extra, match=m)
487 487
488 488 for name in names:
489 489 self.hook('tag', node=hex(node), tag=name, local=local)
490 490
491 491 return tagnode
492 492
493 493 def tag(self, names, node, message, local, user, date):
494 494 '''tag a revision with one or more symbolic names.
495 495
496 496 names is a list of strings or, when adding a single tag, names may be a
497 497 string.
498 498
499 499 if local is True, the tags are stored in a per-repository file.
500 500 otherwise, they are stored in the .hgtags file, and a new
501 501 changeset is committed with the change.
502 502
503 503 keyword arguments:
504 504
505 505 local: whether to store tags in non-version-controlled file
506 506 (default False)
507 507
508 508 message: commit message to use if committing
509 509
510 510 user: name of user to use if committing
511 511
512 512 date: date tuple to use if committing'''
513 513
514 514 if not local:
515 515 for x in self.status()[:5]:
516 516 if '.hgtags' in x:
517 517 raise util.Abort(_('working copy of .hgtags is changed '
518 518 '(please commit .hgtags manually)'))
519 519
520 520 self.tags() # instantiate the cache
521 521 self._tag(names, node, message, local, user, date)
522 522
523 523 @filteredpropertycache
524 524 def _tagscache(self):
525 525 '''Returns a tagscache object that contains various tags related
526 526 caches.'''
527 527
528 528 # This simplifies its cache management by having one decorated
529 529 # function (this one) and the rest simply fetch things from it.
530 530 class tagscache(object):
531 531 def __init__(self):
532 532 # These two define the set of tags for this repository. tags
533 533 # maps tag name to node; tagtypes maps tag name to 'global' or
534 534 # 'local'. (Global tags are defined by .hgtags across all
535 535 # heads, and local tags are defined in .hg/localtags.)
536 536 # They constitute the in-memory cache of tags.
537 537 self.tags = self.tagtypes = None
538 538
539 539 self.nodetagscache = self.tagslist = None
540 540
541 541 cache = tagscache()
542 542 cache.tags, cache.tagtypes = self._findtags()
543 543
544 544 return cache
545 545
546 546 def tags(self):
547 547 '''return a mapping of tag to node'''
548 548 t = {}
549 549 if self.changelog.filteredrevs:
550 550 tags, tt = self._findtags()
551 551 else:
552 552 tags = self._tagscache.tags
553 553 for k, v in tags.iteritems():
554 554 try:
555 555 # ignore tags to unknown nodes
556 556 self.changelog.rev(v)
557 557 t[k] = v
558 558 except (error.LookupError, ValueError):
559 559 pass
560 560 return t
561 561
562 562 def _findtags(self):
563 563 '''Do the hard work of finding tags. Return a pair of dicts
564 564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 565 maps tag name to a string like \'global\' or \'local\'.
566 566 Subclasses or extensions are free to add their own tags, but
567 567 should be aware that the returned dicts will be retained for the
568 568 duration of the localrepo object.'''
569 569
570 570 # XXX what tagtype should subclasses/extensions use? Currently
571 571 # mq and bookmarks add tags, but do not set the tagtype at all.
572 572 # Should each extension invent its own tag type? Should there
573 573 # be one tagtype for all such "virtual" tags? Or is the status
574 574 # quo fine?
575 575
576 576 alltags = {} # map tag name to (node, hist)
577 577 tagtypes = {}
578 578
579 579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581 581
582 582 # Build the return dicts. Have to re-encode tag names because
583 583 # the tags module always uses UTF-8 (in order not to lose info
584 584 # writing to the cache), but the rest of Mercurial wants them in
585 585 # local encoding.
586 586 tags = {}
587 587 for (name, (node, hist)) in alltags.iteritems():
588 588 if node != nullid:
589 589 tags[encoding.tolocal(name)] = node
590 590 tags['tip'] = self.changelog.tip()
591 591 tagtypes = dict([(encoding.tolocal(name), value)
592 592 for (name, value) in tagtypes.iteritems()])
593 593 return (tags, tagtypes)
594 594
595 595 def tagtype(self, tagname):
596 596 '''
597 597 return the type of the given tag. result can be:
598 598
599 599 'local' : a local tag
600 600 'global' : a global tag
601 601 None : tag does not exist
602 602 '''
603 603
604 604 return self._tagscache.tagtypes.get(tagname)
605 605
606 606 def tagslist(self):
607 607 '''return a list of tags ordered by revision'''
608 608 if not self._tagscache.tagslist:
609 609 l = []
610 610 for t, n in self.tags().iteritems():
611 611 r = self.changelog.rev(n)
612 612 l.append((r, t, n))
613 613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614 614
615 615 return self._tagscache.tagslist
616 616
617 617 def nodetags(self, node):
618 618 '''return the tags associated with a node'''
619 619 if not self._tagscache.nodetagscache:
620 620 nodetagscache = {}
621 621 for t, n in self._tagscache.tags.iteritems():
622 622 nodetagscache.setdefault(n, []).append(t)
623 623 for tags in nodetagscache.itervalues():
624 624 tags.sort()
625 625 self._tagscache.nodetagscache = nodetagscache
626 626 return self._tagscache.nodetagscache.get(node, [])
627 627
628 628 def nodebookmarks(self, node):
629 629 marks = []
630 630 for bookmark, n in self._bookmarks.iteritems():
631 631 if n == node:
632 632 marks.append(bookmark)
633 633 return sorted(marks)
634 634
635 635 def branchmap(self):
636 636 '''returns a dictionary {branch: [branchheads]}'''
637 637 branchmap.updatecache(self)
638 638 return self._branchcaches[self.filtername]
639 639
640 640
641 641 def _branchtip(self, heads):
642 642 '''return the tipmost branch head in heads'''
643 643 tip = heads[-1]
644 644 for h in reversed(heads):
645 645 if not self[h].closesbranch():
646 646 tip = h
647 647 break
648 648 return tip
649 649
650 650 def branchtip(self, branch):
651 651 '''return the tip node for a given branch'''
652 652 if branch not in self.branchmap():
653 653 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
654 654 return self._branchtip(self.branchmap()[branch])
655 655
656 656 def branchtags(self):
657 657 '''return a dict where branch names map to the tipmost head of
658 658 the branch, open heads come before closed'''
659 659 bt = {}
660 660 for bn, heads in self.branchmap().iteritems():
661 661 bt[bn] = self._branchtip(heads)
662 662 return bt
663 663
664 664 def lookup(self, key):
665 665 return self[key].node()
666 666
667 667 def lookupbranch(self, key, remote=None):
668 668 repo = remote or self
669 669 if key in repo.branchmap():
670 670 return key
671 671
672 672 repo = (remote and remote.local()) and remote or self
673 673 return repo[key].branch()
674 674
675 675 def known(self, nodes):
676 676 nm = self.changelog.nodemap
677 677 pc = self._phasecache
678 678 result = []
679 679 for n in nodes:
680 680 r = nm.get(n)
681 681 resp = not (r is None or pc.phase(self, r) >= phases.secret)
682 682 result.append(resp)
683 683 return result
684 684
685 685 def local(self):
686 686 return self
687 687
688 688 def cancopy(self):
689 689 return self.local() # so statichttprepo's override of local() works
690 690
691 691 def join(self, f):
692 692 return os.path.join(self.path, f)
693 693
694 694 def wjoin(self, f):
695 695 return os.path.join(self.root, f)
696 696
697 697 def file(self, f):
698 698 if f[0] == '/':
699 699 f = f[1:]
700 700 return filelog.filelog(self.sopener, f)
701 701
702 702 def changectx(self, changeid):
703 703 return self[changeid]
704 704
705 705 def parents(self, changeid=None):
706 706 '''get list of changectxs for parents of changeid'''
707 707 return self[changeid].parents()
708 708
709 709 def setparents(self, p1, p2=nullid):
710 710 copies = self.dirstate.setparents(p1, p2)
711 711 pctx = self[p1]
712 712 if copies:
713 713 # Adjust copy records, the dirstate cannot do it, it
714 714 # requires access to parents manifests. Preserve them
715 715 # only for entries added to first parent.
716 716 for f in copies:
717 717 if f not in pctx and copies[f] in pctx:
718 718 self.dirstate.copy(copies[f], f)
719 719 if p2 == nullid:
720 720 for f, s in sorted(self.dirstate.copies().items()):
721 721 if f not in pctx and s not in pctx:
722 722 self.dirstate.copy(None, f)
723 723
724 724 def filectx(self, path, changeid=None, fileid=None):
725 725 """changeid can be a changeset revision, node, or tag.
726 726 fileid can be a file revision or node."""
727 727 return context.filectx(self, path, changeid, fileid)
728 728
729 729 def getcwd(self):
730 730 return self.dirstate.getcwd()
731 731
732 732 def pathto(self, f, cwd=None):
733 733 return self.dirstate.pathto(f, cwd)
734 734
735 735 def wfile(self, f, mode='r'):
736 736 return self.wopener(f, mode)
737 737
738 738 def _link(self, f):
739 739 return self.wvfs.islink(f)
740 740
741 741 def _loadfilter(self, filter):
742 742 if filter not in self.filterpats:
743 743 l = []
744 744 for pat, cmd in self.ui.configitems(filter):
745 745 if cmd == '!':
746 746 continue
747 747 mf = matchmod.match(self.root, '', [pat])
748 748 fn = None
749 749 params = cmd
750 750 for name, filterfn in self._datafilters.iteritems():
751 751 if cmd.startswith(name):
752 752 fn = filterfn
753 753 params = cmd[len(name):].lstrip()
754 754 break
755 755 if not fn:
756 756 fn = lambda s, c, **kwargs: util.filter(s, c)
757 757 # Wrap old filters not supporting keyword arguments
758 758 if not inspect.getargspec(fn)[2]:
759 759 oldfn = fn
760 760 fn = lambda s, c, **kwargs: oldfn(s, c)
761 761 l.append((mf, fn, params))
762 762 self.filterpats[filter] = l
763 763 return self.filterpats[filter]
764 764
765 765 def _filter(self, filterpats, filename, data):
766 766 for mf, fn, cmd in filterpats:
767 767 if mf(filename):
768 768 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
769 769 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
770 770 break
771 771
772 772 return data
773 773
774 774 @unfilteredpropertycache
775 775 def _encodefilterpats(self):
776 776 return self._loadfilter('encode')
777 777
778 778 @unfilteredpropertycache
779 779 def _decodefilterpats(self):
780 780 return self._loadfilter('decode')
781 781
782 782 def adddatafilter(self, name, filter):
783 783 self._datafilters[name] = filter
784 784
785 785 def wread(self, filename):
786 786 if self._link(filename):
787 787 data = self.wvfs.readlink(filename)
788 788 else:
789 789 data = self.wopener.read(filename)
790 790 return self._filter(self._encodefilterpats, filename, data)
791 791
792 792 def wwrite(self, filename, data, flags):
793 793 data = self._filter(self._decodefilterpats, filename, data)
794 794 if 'l' in flags:
795 795 self.wopener.symlink(data, filename)
796 796 else:
797 797 self.wopener.write(filename, data)
798 798 if 'x' in flags:
799 799 self.wvfs.setflags(filename, False, True)
800 800
801 801 def wwritedata(self, filename, data):
802 802 return self._filter(self._decodefilterpats, filename, data)
803 803
804 804 def transaction(self, desc):
805 805 tr = self._transref and self._transref() or None
806 806 if tr and tr.running():
807 807 return tr.nest()
808 808
809 809 # abort here if the journal already exists
810 810 if self.svfs.exists("journal"):
811 811 raise error.RepoError(
812 812 _("abandoned transaction found - run hg recover"))
813 813
814 814 self._writejournal(desc)
815 815 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
816 816
817 817 tr = transaction.transaction(self.ui.warn, self.sopener,
818 818 self.sjoin("journal"),
819 819 aftertrans(renames),
820 820 self.store.createmode)
821 821 self._transref = weakref.ref(tr)
822 822 return tr
823 823
824 824 def _journalfiles(self):
825 825 return ((self.svfs, 'journal'),
826 826 (self.vfs, 'journal.dirstate'),
827 827 (self.vfs, 'journal.branch'),
828 828 (self.vfs, 'journal.desc'),
829 829 (self.vfs, 'journal.bookmarks'),
830 830 (self.svfs, 'journal.phaseroots'))
831 831
832 832 def undofiles(self):
833 833 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
834 834
835 835 def _writejournal(self, desc):
836 836 self.opener.write("journal.dirstate",
837 837 self.opener.tryread("dirstate"))
838 838 self.opener.write("journal.branch",
839 839 encoding.fromlocal(self.dirstate.branch()))
840 840 self.opener.write("journal.desc",
841 841 "%d\n%s\n" % (len(self), desc))
842 842 self.opener.write("journal.bookmarks",
843 843 self.opener.tryread("bookmarks"))
844 844 self.sopener.write("journal.phaseroots",
845 845 self.sopener.tryread("phaseroots"))
846 846
847 847 def recover(self):
848 848 lock = self.lock()
849 849 try:
850 850 if self.svfs.exists("journal"):
851 851 self.ui.status(_("rolling back interrupted transaction\n"))
852 852 transaction.rollback(self.sopener, self.sjoin("journal"),
853 853 self.ui.warn)
854 854 self.invalidate()
855 855 return True
856 856 else:
857 857 self.ui.warn(_("no interrupted transaction available\n"))
858 858 return False
859 859 finally:
860 860 lock.release()
861 861
862 862 def rollback(self, dryrun=False, force=False):
863 863 wlock = lock = None
864 864 try:
865 865 wlock = self.wlock()
866 866 lock = self.lock()
867 867 if self.svfs.exists("undo"):
868 868 return self._rollback(dryrun, force)
869 869 else:
870 870 self.ui.warn(_("no rollback information available\n"))
871 871 return 1
872 872 finally:
873 873 release(lock, wlock)
874 874
875 875 @unfilteredmethod # Until we get smarter cache management
876 876 def _rollback(self, dryrun, force):
877 877 ui = self.ui
878 878 try:
879 879 args = self.opener.read('undo.desc').splitlines()
880 880 (oldlen, desc, detail) = (int(args[0]), args[1], None)
881 881 if len(args) >= 3:
882 882 detail = args[2]
883 883 oldtip = oldlen - 1
884 884
885 885 if detail and ui.verbose:
886 886 msg = (_('repository tip rolled back to revision %s'
887 887 ' (undo %s: %s)\n')
888 888 % (oldtip, desc, detail))
889 889 else:
890 890 msg = (_('repository tip rolled back to revision %s'
891 891 ' (undo %s)\n')
892 892 % (oldtip, desc))
893 893 except IOError:
894 894 msg = _('rolling back unknown transaction\n')
895 895 desc = None
896 896
897 897 if not force and self['.'] != self['tip'] and desc == 'commit':
898 898 raise util.Abort(
899 899 _('rollback of last commit while not checked out '
900 900 'may lose data'), hint=_('use -f to force'))
901 901
902 902 ui.status(msg)
903 903 if dryrun:
904 904 return 0
905 905
906 906 parents = self.dirstate.parents()
907 907 self.destroying()
908 908 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
909 909 if self.vfs.exists('undo.bookmarks'):
910 910 self.vfs.rename('undo.bookmarks', 'bookmarks')
911 911 if self.svfs.exists('undo.phaseroots'):
912 912 self.svfs.rename('undo.phaseroots', 'phaseroots')
913 913 self.invalidate()
914 914
915 915 parentgone = (parents[0] not in self.changelog.nodemap or
916 916 parents[1] not in self.changelog.nodemap)
917 917 if parentgone:
918 918 self.vfs.rename('undo.dirstate', 'dirstate')
919 919 try:
920 920 branch = self.opener.read('undo.branch')
921 921 self.dirstate.setbranch(encoding.tolocal(branch))
922 922 except IOError:
923 923 ui.warn(_('named branch could not be reset: '
924 924 'current branch is still \'%s\'\n')
925 925 % self.dirstate.branch())
926 926
927 927 self.dirstate.invalidate()
928 928 parents = tuple([p.rev() for p in self.parents()])
929 929 if len(parents) > 1:
930 930 ui.status(_('working directory now based on '
931 931 'revisions %d and %d\n') % parents)
932 932 else:
933 933 ui.status(_('working directory now based on '
934 934 'revision %d\n') % parents)
935 935 # TODO: if we know which new heads may result from this rollback, pass
936 936 # them to destroy(), which will prevent the branchhead cache from being
937 937 # invalidated.
938 938 self.destroyed()
939 939 return 0
940 940
941 941 def invalidatecaches(self):
942 942
943 943 if '_tagscache' in vars(self):
944 944 # can't use delattr on proxy
945 945 del self.__dict__['_tagscache']
946 946
947 947 self.unfiltered()._branchcaches.clear()
948 948 self.invalidatevolatilesets()
949 949
950 950 def invalidatevolatilesets(self):
951 951 self.filteredrevcache.clear()
952 952 obsolete.clearobscaches(self)
953 953
954 954 def invalidatedirstate(self):
955 955 '''Invalidates the dirstate, causing the next call to dirstate
956 956 to check if it was modified since the last time it was read,
957 957 rereading it if it has.
958 958
959 959 This is different to dirstate.invalidate() that it doesn't always
960 960 rereads the dirstate. Use dirstate.invalidate() if you want to
961 961 explicitly read the dirstate again (i.e. restoring it to a previous
962 962 known good state).'''
963 963 if hasunfilteredcache(self, 'dirstate'):
964 964 for k in self.dirstate._filecache:
965 965 try:
966 966 delattr(self.dirstate, k)
967 967 except AttributeError:
968 968 pass
969 969 delattr(self.unfiltered(), 'dirstate')
970 970
971 971 def invalidate(self):
972 972 unfiltered = self.unfiltered() # all file caches are stored unfiltered
973 973 for k in self._filecache:
974 974 # dirstate is invalidated separately in invalidatedirstate()
975 975 if k == 'dirstate':
976 976 continue
977 977
978 978 try:
979 979 delattr(unfiltered, k)
980 980 except AttributeError:
981 981 pass
982 982 self.invalidatecaches()
983 983
984 984 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
985 985 try:
986 986 l = lock.lock(lockname, 0, releasefn, desc=desc)
987 987 except error.LockHeld, inst:
988 988 if not wait:
989 989 raise
990 990 self.ui.warn(_("waiting for lock on %s held by %r\n") %
991 991 (desc, inst.locker))
992 992 # default to 600 seconds timeout
993 993 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
994 994 releasefn, desc=desc)
995 995 if acquirefn:
996 996 acquirefn()
997 997 return l
998 998
999 999 def _afterlock(self, callback):
1000 1000 """add a callback to the current repository lock.
1001 1001
1002 1002 The callback will be executed on lock release."""
1003 1003 l = self._lockref and self._lockref()
1004 1004 if l:
1005 1005 l.postrelease.append(callback)
1006 1006 else:
1007 1007 callback()
1008 1008
1009 1009 def lock(self, wait=True):
1010 1010 '''Lock the repository store (.hg/store) and return a weak reference
1011 1011 to the lock. Use this before modifying the store (e.g. committing or
1012 1012 stripping). If you are opening a transaction, get a lock as well.)'''
1013 1013 l = self._lockref and self._lockref()
1014 1014 if l is not None and l.held:
1015 1015 l.lock()
1016 1016 return l
1017 1017
1018 1018 def unlock():
1019 1019 self.store.write()
1020 1020 if hasunfilteredcache(self, '_phasecache'):
1021 1021 self._phasecache.write()
1022 1022 for k, ce in self._filecache.items():
1023 1023 if k == 'dirstate' or k not in self.__dict__:
1024 1024 continue
1025 1025 ce.refresh()
1026 1026
1027 1027 l = self._lock(self.sjoin("lock"), wait, unlock,
1028 1028 self.invalidate, _('repository %s') % self.origroot)
1029 1029 self._lockref = weakref.ref(l)
1030 1030 return l
1031 1031
1032 1032 def wlock(self, wait=True):
1033 1033 '''Lock the non-store parts of the repository (everything under
1034 1034 .hg except .hg/store) and return a weak reference to the lock.
1035 1035 Use this before modifying files in .hg.'''
1036 1036 l = self._wlockref and self._wlockref()
1037 1037 if l is not None and l.held:
1038 1038 l.lock()
1039 1039 return l
1040 1040
1041 1041 def unlock():
1042 1042 self.dirstate.write()
1043 1043 self._filecache['dirstate'].refresh()
1044 1044
1045 1045 l = self._lock(self.join("wlock"), wait, unlock,
1046 1046 self.invalidatedirstate, _('working directory of %s') %
1047 1047 self.origroot)
1048 1048 self._wlockref = weakref.ref(l)
1049 1049 return l
1050 1050
1051 1051 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1052 1052 """
1053 1053 commit an individual file as part of a larger transaction
1054 1054 """
1055 1055
1056 1056 fname = fctx.path()
1057 1057 text = fctx.data()
1058 1058 flog = self.file(fname)
1059 1059 fparent1 = manifest1.get(fname, nullid)
1060 1060 fparent2 = fparent2o = manifest2.get(fname, nullid)
1061 1061
1062 1062 meta = {}
1063 1063 copy = fctx.renamed()
1064 1064 if copy and copy[0] != fname:
1065 1065 # Mark the new revision of this file as a copy of another
1066 1066 # file. This copy data will effectively act as a parent
1067 1067 # of this new revision. If this is a merge, the first
1068 1068 # parent will be the nullid (meaning "look up the copy data")
1069 1069 # and the second one will be the other parent. For example:
1070 1070 #
1071 1071 # 0 --- 1 --- 3 rev1 changes file foo
1072 1072 # \ / rev2 renames foo to bar and changes it
1073 1073 # \- 2 -/ rev3 should have bar with all changes and
1074 1074 # should record that bar descends from
1075 1075 # bar in rev2 and foo in rev1
1076 1076 #
1077 1077 # this allows this merge to succeed:
1078 1078 #
1079 1079 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1080 1080 # \ / merging rev3 and rev4 should use bar@rev2
1081 1081 # \- 2 --- 4 as the merge base
1082 1082 #
1083 1083
1084 1084 cfname = copy[0]
1085 1085 crev = manifest1.get(cfname)
1086 1086 newfparent = fparent2
1087 1087
1088 1088 if manifest2: # branch merge
1089 1089 if fparent2 == nullid or crev is None: # copied on remote side
1090 1090 if cfname in manifest2:
1091 1091 crev = manifest2[cfname]
1092 1092 newfparent = fparent1
1093 1093
1094 1094 # find source in nearest ancestor if we've lost track
1095 1095 if not crev:
1096 1096 self.ui.debug(" %s: searching for copy revision for %s\n" %
1097 1097 (fname, cfname))
1098 1098 for ancestor in self[None].ancestors():
1099 1099 if cfname in ancestor:
1100 1100 crev = ancestor[cfname].filenode()
1101 1101 break
1102 1102
1103 1103 if crev:
1104 1104 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1105 1105 meta["copy"] = cfname
1106 1106 meta["copyrev"] = hex(crev)
1107 1107 fparent1, fparent2 = nullid, newfparent
1108 1108 else:
1109 1109 self.ui.warn(_("warning: can't find ancestor for '%s' "
1110 1110 "copied from '%s'!\n") % (fname, cfname))
1111 1111
1112 1112 elif fparent2 != nullid:
1113 1113 # is one parent an ancestor of the other?
1114 1114 fparentancestor = flog.ancestor(fparent1, fparent2)
1115 1115 if fparentancestor == fparent1:
1116 1116 fparent1, fparent2 = fparent2, nullid
1117 1117 elif fparentancestor == fparent2:
1118 1118 fparent2 = nullid
1119 1119
1120 1120 # is the file changed?
1121 1121 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1122 1122 changelist.append(fname)
1123 1123 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1124 1124
1125 1125 # are just the flags changed during merge?
1126 1126 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1127 1127 changelist.append(fname)
1128 1128
1129 1129 return fparent1
1130 1130
1131 1131 @unfilteredmethod
1132 1132 def commit(self, text="", user=None, date=None, match=None, force=False,
1133 1133 editor=False, extra={}):
1134 1134 """Add a new revision to current repository.
1135 1135
1136 1136 Revision information is gathered from the working directory,
1137 1137 match can be used to filter the committed files. If editor is
1138 1138 supplied, it is called to get a commit message.
1139 1139 """
1140 1140
1141 1141 def fail(f, msg):
1142 1142 raise util.Abort('%s: %s' % (f, msg))
1143 1143
1144 1144 if not match:
1145 1145 match = matchmod.always(self.root, '')
1146 1146
1147 1147 if not force:
1148 1148 vdirs = []
1149 1149 match.explicitdir = vdirs.append
1150 1150 match.bad = fail
1151 1151
1152 1152 wlock = self.wlock()
1153 1153 try:
1154 1154 wctx = self[None]
1155 1155 merge = len(wctx.parents()) > 1
1156 1156
1157 1157 if (not force and merge and match and
1158 1158 (match.files() or match.anypats())):
1159 1159 raise util.Abort(_('cannot partially commit a merge '
1160 1160 '(do not specify files or patterns)'))
1161 1161
1162 1162 changes = self.status(match=match, clean=force)
1163 1163 if force:
1164 1164 changes[0].extend(changes[6]) # mq may commit unchanged files
1165 1165
1166 1166 # check subrepos
1167 1167 subs = []
1168 1168 commitsubs = set()
1169 1169 newstate = wctx.substate.copy()
1170 1170 # only manage subrepos and .hgsubstate if .hgsub is present
1171 1171 if '.hgsub' in wctx:
1172 1172 # we'll decide whether to track this ourselves, thanks
1173 1173 if '.hgsubstate' in changes[0]:
1174 1174 changes[0].remove('.hgsubstate')
1175 1175 if '.hgsubstate' in changes[2]:
1176 1176 changes[2].remove('.hgsubstate')
1177 1177
1178 1178 # compare current state to last committed state
1179 1179 # build new substate based on last committed state
1180 1180 oldstate = wctx.p1().substate
1181 1181 for s in sorted(newstate.keys()):
1182 1182 if not match(s):
1183 1183 # ignore working copy, use old state if present
1184 1184 if s in oldstate:
1185 1185 newstate[s] = oldstate[s]
1186 1186 continue
1187 1187 if not force:
1188 1188 raise util.Abort(
1189 1189 _("commit with new subrepo %s excluded") % s)
1190 1190 if wctx.sub(s).dirty(True):
1191 1191 if not self.ui.configbool('ui', 'commitsubrepos'):
1192 1192 raise util.Abort(
1193 1193 _("uncommitted changes in subrepo %s") % s,
1194 1194 hint=_("use --subrepos for recursive commit"))
1195 1195 subs.append(s)
1196 1196 commitsubs.add(s)
1197 1197 else:
1198 1198 bs = wctx.sub(s).basestate()
1199 1199 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1200 1200 if oldstate.get(s, (None, None, None))[1] != bs:
1201 1201 subs.append(s)
1202 1202
1203 1203 # check for removed subrepos
1204 1204 for p in wctx.parents():
1205 1205 r = [s for s in p.substate if s not in newstate]
1206 1206 subs += [s for s in r if match(s)]
1207 1207 if subs:
1208 1208 if (not match('.hgsub') and
1209 1209 '.hgsub' in (wctx.modified() + wctx.added())):
1210 1210 raise util.Abort(
1211 1211 _("can't commit subrepos without .hgsub"))
1212 1212 changes[0].insert(0, '.hgsubstate')
1213 1213
1214 1214 elif '.hgsub' in changes[2]:
1215 1215 # clean up .hgsubstate when .hgsub is removed
1216 1216 if ('.hgsubstate' in wctx and
1217 1217 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1218 1218 changes[2].insert(0, '.hgsubstate')
1219 1219
1220 1220 # make sure all explicit patterns are matched
1221 1221 if not force and match.files():
1222 1222 matched = set(changes[0] + changes[1] + changes[2])
1223 1223
1224 1224 for f in match.files():
1225 1225 f = self.dirstate.normalize(f)
1226 1226 if f == '.' or f in matched or f in wctx.substate:
1227 1227 continue
1228 1228 if f in changes[3]: # missing
1229 1229 fail(f, _('file not found!'))
1230 1230 if f in vdirs: # visited directory
1231 1231 d = f + '/'
1232 1232 for mf in matched:
1233 1233 if mf.startswith(d):
1234 1234 break
1235 1235 else:
1236 1236 fail(f, _("no match under directory!"))
1237 1237 elif f not in self.dirstate:
1238 1238 fail(f, _("file not tracked!"))
1239 1239
1240 1240 cctx = context.workingctx(self, text, user, date, extra, changes)
1241 1241
1242 1242 if (not force and not extra.get("close") and not merge
1243 1243 and not cctx.files()
1244 1244 and wctx.branch() == wctx.p1().branch()):
1245 1245 return None
1246 1246
1247 1247 if merge and cctx.deleted():
1248 1248 raise util.Abort(_("cannot commit merge with missing files"))
1249 1249
1250 1250 ms = mergemod.mergestate(self)
1251 1251 for f in changes[0]:
1252 1252 if f in ms and ms[f] == 'u':
1253 1253 raise util.Abort(_("unresolved merge conflicts "
1254 1254 "(see hg help resolve)"))
1255 1255
1256 1256 if editor:
1257 1257 cctx._text = editor(self, cctx, subs)
1258 1258 edited = (text != cctx._text)
1259 1259
1260 1260 # commit subs and write new state
1261 1261 if subs:
1262 1262 for s in sorted(commitsubs):
1263 1263 sub = wctx.sub(s)
1264 1264 self.ui.status(_('committing subrepository %s\n') %
1265 1265 subrepo.subrelpath(sub))
1266 1266 sr = sub.commit(cctx._text, user, date)
1267 1267 newstate[s] = (newstate[s][0], sr)
1268 1268 subrepo.writestate(self, newstate)
1269 1269
1270 1270 # Save commit message in case this transaction gets rolled back
1271 1271 # (e.g. by a pretxncommit hook). Leave the content alone on
1272 1272 # the assumption that the user will use the same editor again.
1273 1273 msgfn = self.savecommitmessage(cctx._text)
1274 1274
1275 1275 p1, p2 = self.dirstate.parents()
1276 1276 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1277 1277 try:
1278 1278 self.hook("precommit", throw=True, parent1=hookp1,
1279 1279 parent2=hookp2)
1280 1280 ret = self.commitctx(cctx, True)
1281 1281 except: # re-raises
1282 1282 if edited:
1283 1283 self.ui.write(
1284 1284 _('note: commit message saved in %s\n') % msgfn)
1285 1285 raise
1286 1286
1287 1287 # update bookmarks, dirstate and mergestate
1288 1288 bookmarks.update(self, [p1, p2], ret)
1289 1289 cctx.markcommitted(ret)
1290 1290 ms.reset()
1291 1291 finally:
1292 1292 wlock.release()
1293 1293
1294 1294 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1295 1295 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1296 1296 self._afterlock(commithook)
1297 1297 return ret
1298 1298
1299 1299 @unfilteredmethod
1300 1300 def commitctx(self, ctx, error=False):
1301 1301 """Add a new revision to current repository.
1302 1302 Revision information is passed via the context argument.
1303 1303 """
1304 1304
1305 1305 tr = lock = None
1306 1306 removed = list(ctx.removed())
1307 1307 p1, p2 = ctx.p1(), ctx.p2()
1308 1308 user = ctx.user()
1309 1309
1310 1310 lock = self.lock()
1311 1311 try:
1312 1312 tr = self.transaction("commit")
1313 1313 trp = weakref.proxy(tr)
1314 1314
1315 1315 if ctx.files():
1316 1316 m1 = p1.manifest().copy()
1317 1317 m2 = p2.manifest()
1318 1318
1319 1319 # check in files
1320 1320 new = {}
1321 1321 changed = []
1322 1322 linkrev = len(self)
1323 1323 for f in sorted(ctx.modified() + ctx.added()):
1324 1324 self.ui.note(f + "\n")
1325 1325 try:
1326 1326 fctx = ctx[f]
1327 1327 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1328 1328 changed)
1329 1329 m1.set(f, fctx.flags())
1330 1330 except OSError, inst:
1331 1331 self.ui.warn(_("trouble committing %s!\n") % f)
1332 1332 raise
1333 1333 except IOError, inst:
1334 1334 errcode = getattr(inst, 'errno', errno.ENOENT)
1335 1335 if error or errcode and errcode != errno.ENOENT:
1336 1336 self.ui.warn(_("trouble committing %s!\n") % f)
1337 1337 raise
1338 1338 else:
1339 1339 removed.append(f)
1340 1340
1341 1341 # update manifest
1342 1342 m1.update(new)
1343 1343 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1344 1344 drop = [f for f in removed if f in m1]
1345 1345 for f in drop:
1346 1346 del m1[f]
1347 1347 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1348 1348 p2.manifestnode(), (new, drop))
1349 1349 files = changed + removed
1350 1350 else:
1351 1351 mn = p1.manifestnode()
1352 1352 files = []
1353 1353
1354 1354 # update changelog
1355 1355 self.changelog.delayupdate()
1356 1356 n = self.changelog.add(mn, files, ctx.description(),
1357 1357 trp, p1.node(), p2.node(),
1358 1358 user, ctx.date(), ctx.extra().copy())
1359 1359 p = lambda: self.changelog.writepending() and self.root or ""
1360 1360 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1361 1361 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1362 1362 parent2=xp2, pending=p)
1363 1363 self.changelog.finalize(trp)
1364 1364 # set the new commit is proper phase
1365 1365 targetphase = phases.newcommitphase(self.ui)
1366 1366 if targetphase:
1367 1367 # retract boundary do not alter parent changeset.
1368 1368 # if a parent have higher the resulting phase will
1369 1369 # be compliant anyway
1370 1370 #
1371 1371 # if minimal phase was 0 we don't need to retract anything
1372 1372 phases.retractboundary(self, targetphase, [n])
1373 1373 tr.close()
1374 1374 branchmap.updatecache(self.filtered('served'))
1375 1375 return n
1376 1376 finally:
1377 1377 if tr:
1378 1378 tr.release()
1379 1379 lock.release()
1380 1380
1381 1381 @unfilteredmethod
1382 1382 def destroying(self):
1383 1383 '''Inform the repository that nodes are about to be destroyed.
1384 1384 Intended for use by strip and rollback, so there's a common
1385 1385 place for anything that has to be done before destroying history.
1386 1386
1387 1387 This is mostly useful for saving state that is in memory and waiting
1388 1388 to be flushed when the current lock is released. Because a call to
1389 1389 destroyed is imminent, the repo will be invalidated causing those
1390 1390 changes to stay in memory (waiting for the next unlock), or vanish
1391 1391 completely.
1392 1392 '''
1393 1393 # When using the same lock to commit and strip, the phasecache is left
1394 1394 # dirty after committing. Then when we strip, the repo is invalidated,
1395 1395 # causing those changes to disappear.
1396 1396 if '_phasecache' in vars(self):
1397 1397 self._phasecache.write()
1398 1398
1399 1399 @unfilteredmethod
1400 1400 def destroyed(self):
1401 1401 '''Inform the repository that nodes have been destroyed.
1402 1402 Intended for use by strip and rollback, so there's a common
1403 1403 place for anything that has to be done after destroying history.
1404 1404 '''
1405 1405 # When one tries to:
1406 1406 # 1) destroy nodes thus calling this method (e.g. strip)
1407 1407 # 2) use phasecache somewhere (e.g. commit)
1408 1408 #
1409 1409 # then 2) will fail because the phasecache contains nodes that were
1410 1410 # removed. We can either remove phasecache from the filecache,
1411 1411 # causing it to reload next time it is accessed, or simply filter
1412 1412 # the removed nodes now and write the updated cache.
1413 1413 self._phasecache.filterunknown(self)
1414 1414 self._phasecache.write()
1415 1415
1416 1416 # update the 'served' branch cache to help read only server process
1417 1417 # Thanks to branchcache collaboration this is done from the nearest
1418 1418 # filtered subset and it is expected to be fast.
1419 1419 branchmap.updatecache(self.filtered('served'))
1420 1420
1421 1421 # Ensure the persistent tag cache is updated. Doing it now
1422 1422 # means that the tag cache only has to worry about destroyed
1423 1423 # heads immediately after a strip/rollback. That in turn
1424 1424 # guarantees that "cachetip == currenttip" (comparing both rev
1425 1425 # and node) always means no nodes have been added or destroyed.
1426 1426
1427 1427 # XXX this is suboptimal when qrefresh'ing: we strip the current
1428 1428 # head, refresh the tag cache, then immediately add a new head.
1429 1429 # But I think doing it this way is necessary for the "instant
1430 1430 # tag cache retrieval" case to work.
1431 1431 self.invalidate()
1432 1432
1433 1433 def walk(self, match, node=None):
1434 1434 '''
1435 1435 walk recursively through the directory tree or a given
1436 1436 changeset, finding all files matched by the match
1437 1437 function
1438 1438 '''
1439 1439 return self[node].walk(match)
1440 1440
1441 1441 def status(self, node1='.', node2=None, match=None,
1442 1442 ignored=False, clean=False, unknown=False,
1443 1443 listsubrepos=False):
1444 1444 """return status of files between two nodes or node and working
1445 1445 directory.
1446 1446
1447 1447 If node1 is None, use the first dirstate parent instead.
1448 1448 If node2 is None, compare node1 with working directory.
1449 1449 """
1450 1450
1451 1451 def mfmatches(ctx):
1452 1452 mf = ctx.manifest().copy()
1453 1453 if match.always():
1454 1454 return mf
1455 1455 for fn in mf.keys():
1456 1456 if not match(fn):
1457 1457 del mf[fn]
1458 1458 return mf
1459 1459
1460 if isinstance(node1, context.changectx):
1461 ctx1 = node1
1462 else:
1463 ctx1 = self[node1]
1464 if isinstance(node2, context.changectx):
1465 ctx2 = node2
1466 else:
1467 ctx2 = self[node2]
1460 ctx1 = self[node1]
1461 ctx2 = self[node2]
1468 1462
1469 1463 working = ctx2.rev() is None
1470 1464 parentworking = working and ctx1 == self['.']
1471 1465 match = match or matchmod.always(self.root, self.getcwd())
1472 1466 listignored, listclean, listunknown = ignored, clean, unknown
1473 1467
1474 1468 # load earliest manifest first for caching reasons
1475 1469 if not working and ctx2.rev() < ctx1.rev():
1476 1470 ctx2.manifest()
1477 1471
1478 1472 if not parentworking:
1479 1473 def bad(f, msg):
1480 1474 # 'f' may be a directory pattern from 'match.files()',
1481 1475 # so 'f not in ctx1' is not enough
1482 1476 if f not in ctx1 and f not in ctx1.dirs():
1483 1477 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1484 1478 match.bad = bad
1485 1479
1486 1480 if working: # we need to scan the working dir
1487 1481 subrepos = []
1488 1482 if '.hgsub' in self.dirstate:
1489 1483 subrepos = sorted(ctx2.substate)
1490 1484 s = self.dirstate.status(match, subrepos, listignored,
1491 1485 listclean, listunknown)
1492 1486 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1493 1487
1494 1488 # check for any possibly clean files
1495 1489 if parentworking and cmp:
1496 1490 fixup = []
1497 1491 # do a full compare of any files that might have changed
1498 1492 for f in sorted(cmp):
1499 1493 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1500 1494 or ctx1[f].cmp(ctx2[f])):
1501 1495 modified.append(f)
1502 1496 else:
1503 1497 fixup.append(f)
1504 1498
1505 1499 # update dirstate for files that are actually clean
1506 1500 if fixup:
1507 1501 if listclean:
1508 1502 clean += fixup
1509 1503
1510 1504 try:
1511 1505 # updating the dirstate is optional
1512 1506 # so we don't wait on the lock
1513 1507 wlock = self.wlock(False)
1514 1508 try:
1515 1509 for f in fixup:
1516 1510 self.dirstate.normal(f)
1517 1511 finally:
1518 1512 wlock.release()
1519 1513 except error.LockError:
1520 1514 pass
1521 1515
1522 1516 if not parentworking:
1523 1517 mf1 = mfmatches(ctx1)
1524 1518 if working:
1525 1519 # we are comparing working dir against non-parent
1526 1520 # generate a pseudo-manifest for the working dir
1527 1521 mf2 = mfmatches(self['.'])
1528 1522 for f in cmp + modified + added:
1529 1523 mf2[f] = None
1530 1524 mf2.set(f, ctx2.flags(f))
1531 1525 for f in removed:
1532 1526 if f in mf2:
1533 1527 del mf2[f]
1534 1528 else:
1535 1529 # we are comparing two revisions
1536 1530 deleted, unknown, ignored = [], [], []
1537 1531 mf2 = mfmatches(ctx2)
1538 1532
1539 1533 modified, added, clean = [], [], []
1540 1534 withflags = mf1.withflags() | mf2.withflags()
1541 1535 for fn, mf2node in mf2.iteritems():
1542 1536 if fn in mf1:
1543 1537 if (fn not in deleted and
1544 1538 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1545 1539 (mf1[fn] != mf2node and
1546 1540 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1547 1541 modified.append(fn)
1548 1542 elif listclean:
1549 1543 clean.append(fn)
1550 1544 del mf1[fn]
1551 1545 elif fn not in deleted:
1552 1546 added.append(fn)
1553 1547 removed = mf1.keys()
1554 1548
1555 1549 if working and modified and not self.dirstate._checklink:
1556 1550 # Symlink placeholders may get non-symlink-like contents
1557 1551 # via user error or dereferencing by NFS or Samba servers,
1558 1552 # so we filter out any placeholders that don't look like a
1559 1553 # symlink
1560 1554 sane = []
1561 1555 for f in modified:
1562 1556 if ctx2.flags(f) == 'l':
1563 1557 d = ctx2[f].data()
1564 1558 if len(d) >= 1024 or '\n' in d or util.binary(d):
1565 1559 self.ui.debug('ignoring suspect symlink placeholder'
1566 1560 ' "%s"\n' % f)
1567 1561 continue
1568 1562 sane.append(f)
1569 1563 modified = sane
1570 1564
1571 1565 r = modified, added, removed, deleted, unknown, ignored, clean
1572 1566
1573 1567 if listsubrepos:
1574 1568 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1575 1569 if working:
1576 1570 rev2 = None
1577 1571 else:
1578 1572 rev2 = ctx2.substate[subpath][1]
1579 1573 try:
1580 1574 submatch = matchmod.narrowmatcher(subpath, match)
1581 1575 s = sub.status(rev2, match=submatch, ignored=listignored,
1582 1576 clean=listclean, unknown=listunknown,
1583 1577 listsubrepos=True)
1584 1578 for rfiles, sfiles in zip(r, s):
1585 1579 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1586 1580 except error.LookupError:
1587 1581 self.ui.status(_("skipping missing subrepository: %s\n")
1588 1582 % subpath)
1589 1583
1590 1584 for l in r:
1591 1585 l.sort()
1592 1586 return r
1593 1587
1594 1588 def heads(self, start=None):
1595 1589 heads = self.changelog.heads(start)
1596 1590 # sort the output in rev descending order
1597 1591 return sorted(heads, key=self.changelog.rev, reverse=True)
1598 1592
1599 1593 def branchheads(self, branch=None, start=None, closed=False):
1600 1594 '''return a (possibly filtered) list of heads for the given branch
1601 1595
1602 1596 Heads are returned in topological order, from newest to oldest.
1603 1597 If branch is None, use the dirstate branch.
1604 1598 If start is not None, return only heads reachable from start.
1605 1599 If closed is True, return heads that are marked as closed as well.
1606 1600 '''
1607 1601 if branch is None:
1608 1602 branch = self[None].branch()
1609 1603 branches = self.branchmap()
1610 1604 if branch not in branches:
1611 1605 return []
1612 1606 # the cache returns heads ordered lowest to highest
1613 1607 bheads = list(reversed(branches[branch]))
1614 1608 if start is not None:
1615 1609 # filter out the heads that cannot be reached from startrev
1616 1610 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1617 1611 bheads = [h for h in bheads if h in fbheads]
1618 1612 if not closed:
1619 1613 bheads = [h for h in bheads if not self[h].closesbranch()]
1620 1614 return bheads
1621 1615
1622 1616 def branches(self, nodes):
1623 1617 if not nodes:
1624 1618 nodes = [self.changelog.tip()]
1625 1619 b = []
1626 1620 for n in nodes:
1627 1621 t = n
1628 1622 while True:
1629 1623 p = self.changelog.parents(n)
1630 1624 if p[1] != nullid or p[0] == nullid:
1631 1625 b.append((t, n, p[0], p[1]))
1632 1626 break
1633 1627 n = p[0]
1634 1628 return b
1635 1629
1636 1630 def between(self, pairs):
1637 1631 r = []
1638 1632
1639 1633 for top, bottom in pairs:
1640 1634 n, l, i = top, [], 0
1641 1635 f = 1
1642 1636
1643 1637 while n != bottom and n != nullid:
1644 1638 p = self.changelog.parents(n)[0]
1645 1639 if i == f:
1646 1640 l.append(n)
1647 1641 f = f * 2
1648 1642 n = p
1649 1643 i += 1
1650 1644
1651 1645 r.append(l)
1652 1646
1653 1647 return r
1654 1648
1655 1649 def pull(self, remote, heads=None, force=False):
1656 1650 # don't open transaction for nothing or you break future useful
1657 1651 # rollback call
1658 1652 tr = None
1659 1653 trname = 'pull\n' + util.hidepassword(remote.url())
1660 1654 lock = self.lock()
1661 1655 try:
1662 1656 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1663 1657 force=force)
1664 1658 common, fetch, rheads = tmp
1665 1659 if not fetch:
1666 1660 self.ui.status(_("no changes found\n"))
1667 1661 added = []
1668 1662 result = 0
1669 1663 else:
1670 1664 tr = self.transaction(trname)
1671 1665 if heads is None and list(common) == [nullid]:
1672 1666 self.ui.status(_("requesting all changes\n"))
1673 1667 elif heads is None and remote.capable('changegroupsubset'):
1674 1668 # issue1320, avoid a race if remote changed after discovery
1675 1669 heads = rheads
1676 1670
1677 1671 if remote.capable('getbundle'):
1678 1672 # TODO: get bundlecaps from remote
1679 1673 cg = remote.getbundle('pull', common=common,
1680 1674 heads=heads or rheads)
1681 1675 elif heads is None:
1682 1676 cg = remote.changegroup(fetch, 'pull')
1683 1677 elif not remote.capable('changegroupsubset'):
1684 1678 raise util.Abort(_("partial pull cannot be done because "
1685 1679 "other repository doesn't support "
1686 1680 "changegroupsubset."))
1687 1681 else:
1688 1682 cg = remote.changegroupsubset(fetch, heads, 'pull')
1689 1683 # we use unfiltered changelog here because hidden revision must
1690 1684 # be taken in account for phase synchronization. They may
1691 1685 # becomes public and becomes visible again.
1692 1686 cl = self.unfiltered().changelog
1693 1687 clstart = len(cl)
1694 1688 result = self.addchangegroup(cg, 'pull', remote.url())
1695 1689 clend = len(cl)
1696 1690 added = [cl.node(r) for r in xrange(clstart, clend)]
1697 1691
1698 1692 # compute target subset
1699 1693 if heads is None:
1700 1694 # We pulled every thing possible
1701 1695 # sync on everything common
1702 1696 subset = common + added
1703 1697 else:
1704 1698 # We pulled a specific subset
1705 1699 # sync on this subset
1706 1700 subset = heads
1707 1701
1708 1702 # Get remote phases data from remote
1709 1703 remotephases = remote.listkeys('phases')
1710 1704 publishing = bool(remotephases.get('publishing', False))
1711 1705 if remotephases and not publishing:
1712 1706 # remote is new and unpublishing
1713 1707 pheads, _dr = phases.analyzeremotephases(self, subset,
1714 1708 remotephases)
1715 1709 phases.advanceboundary(self, phases.public, pheads)
1716 1710 phases.advanceboundary(self, phases.draft, subset)
1717 1711 else:
1718 1712 # Remote is old or publishing all common changesets
1719 1713 # should be seen as public
1720 1714 phases.advanceboundary(self, phases.public, subset)
1721 1715
1722 1716 def gettransaction():
1723 1717 if tr is None:
1724 1718 return self.transaction(trname)
1725 1719 return tr
1726 1720
1727 1721 obstr = obsolete.syncpull(self, remote, gettransaction)
1728 1722 if obstr is not None:
1729 1723 tr = obstr
1730 1724
1731 1725 if tr is not None:
1732 1726 tr.close()
1733 1727 finally:
1734 1728 if tr is not None:
1735 1729 tr.release()
1736 1730 lock.release()
1737 1731
1738 1732 return result
1739 1733
1740 1734 def checkpush(self, force, revs):
1741 1735 """Extensions can override this function if additional checks have
1742 1736 to be performed before pushing, or call it if they override push
1743 1737 command.
1744 1738 """
1745 1739 pass
1746 1740
1747 1741 def push(self, remote, force=False, revs=None, newbranch=False):
1748 1742 '''Push outgoing changesets (limited by revs) from the current
1749 1743 repository to remote. Return an integer:
1750 1744 - None means nothing to push
1751 1745 - 0 means HTTP error
1752 1746 - 1 means we pushed and remote head count is unchanged *or*
1753 1747 we have outgoing changesets but refused to push
1754 1748 - other values as described by addchangegroup()
1755 1749 '''
1756 1750 # there are two ways to push to remote repo:
1757 1751 #
1758 1752 # addchangegroup assumes local user can lock remote
1759 1753 # repo (local filesystem, old ssh servers).
1760 1754 #
1761 1755 # unbundle assumes local user cannot lock remote repo (new ssh
1762 1756 # servers, http servers).
1763 1757
1764 1758 if not remote.canpush():
1765 1759 raise util.Abort(_("destination does not support push"))
1766 1760 unfi = self.unfiltered()
1767 1761 def localphasemove(nodes, phase=phases.public):
1768 1762 """move <nodes> to <phase> in the local source repo"""
1769 1763 if locallock is not None:
1770 1764 phases.advanceboundary(self, phase, nodes)
1771 1765 else:
1772 1766 # repo is not locked, do not change any phases!
1773 1767 # Informs the user that phases should have been moved when
1774 1768 # applicable.
1775 1769 actualmoves = [n for n in nodes if phase < self[n].phase()]
1776 1770 phasestr = phases.phasenames[phase]
1777 1771 if actualmoves:
1778 1772 self.ui.status(_('cannot lock source repo, skipping local'
1779 1773 ' %s phase update\n') % phasestr)
1780 1774 # get local lock as we might write phase data
1781 1775 locallock = None
1782 1776 try:
1783 1777 locallock = self.lock()
1784 1778 except IOError, err:
1785 1779 if err.errno != errno.EACCES:
1786 1780 raise
1787 1781 # source repo cannot be locked.
1788 1782 # We do not abort the push, but just disable the local phase
1789 1783 # synchronisation.
1790 1784 msg = 'cannot lock source repository: %s\n' % err
1791 1785 self.ui.debug(msg)
1792 1786 try:
1793 1787 self.checkpush(force, revs)
1794 1788 lock = None
1795 1789 unbundle = remote.capable('unbundle')
1796 1790 if not unbundle:
1797 1791 lock = remote.lock()
1798 1792 try:
1799 1793 # discovery
1800 1794 fci = discovery.findcommonincoming
1801 1795 commoninc = fci(unfi, remote, force=force)
1802 1796 common, inc, remoteheads = commoninc
1803 1797 fco = discovery.findcommonoutgoing
1804 1798 outgoing = fco(unfi, remote, onlyheads=revs,
1805 1799 commoninc=commoninc, force=force)
1806 1800
1807 1801
1808 1802 if not outgoing.missing:
1809 1803 # nothing to push
1810 1804 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1811 1805 ret = None
1812 1806 else:
1813 1807 # something to push
1814 1808 if not force:
1815 1809 # if self.obsstore == False --> no obsolete
1816 1810 # then, save the iteration
1817 1811 if unfi.obsstore:
1818 1812 # this message are here for 80 char limit reason
1819 1813 mso = _("push includes obsolete changeset: %s!")
1820 1814 mst = "push includes %s changeset: %s!"
1821 1815 # plain versions for i18n tool to detect them
1822 1816 _("push includes unstable changeset: %s!")
1823 1817 _("push includes bumped changeset: %s!")
1824 1818 _("push includes divergent changeset: %s!")
1825 1819 # If we are to push if there is at least one
1826 1820 # obsolete or unstable changeset in missing, at
1827 1821 # least one of the missinghead will be obsolete or
1828 1822 # unstable. So checking heads only is ok
1829 1823 for node in outgoing.missingheads:
1830 1824 ctx = unfi[node]
1831 1825 if ctx.obsolete():
1832 1826 raise util.Abort(mso % ctx)
1833 1827 elif ctx.troubled():
1834 1828 raise util.Abort(_(mst)
1835 1829 % (ctx.troubles()[0],
1836 1830 ctx))
1837 1831 discovery.checkheads(unfi, remote, outgoing,
1838 1832 remoteheads, newbranch,
1839 1833 bool(inc))
1840 1834
1841 1835 # TODO: get bundlecaps from remote
1842 1836 bundlecaps = None
1843 1837 # create a changegroup from local
1844 1838 if revs is None and not outgoing.excluded:
1845 1839 # push everything,
1846 1840 # use the fast path, no race possible on push
1847 1841 bundler = changegroup.bundle10(self, bundlecaps)
1848 1842 cg = self._changegroupsubset(outgoing,
1849 1843 bundler,
1850 1844 'push',
1851 1845 fastpath=True)
1852 1846 else:
1853 1847 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1854 1848
1855 1849 # apply changegroup to remote
1856 1850 if unbundle:
1857 1851 # local repo finds heads on server, finds out what
1858 1852 # revs it must push. once revs transferred, if server
1859 1853 # finds it has different heads (someone else won
1860 1854 # commit/push race), server aborts.
1861 1855 if force:
1862 1856 remoteheads = ['force']
1863 1857 # ssh: return remote's addchangegroup()
1864 1858 # http: return remote's addchangegroup() or 0 for error
1865 1859 ret = remote.unbundle(cg, remoteheads, 'push')
1866 1860 else:
1867 1861 # we return an integer indicating remote head count
1868 1862 # change
1869 1863 ret = remote.addchangegroup(cg, 'push', self.url())
1870 1864
1871 1865 if ret:
1872 1866 # push succeed, synchronize target of the push
1873 1867 cheads = outgoing.missingheads
1874 1868 elif revs is None:
1875 1869 # All out push fails. synchronize all common
1876 1870 cheads = outgoing.commonheads
1877 1871 else:
1878 1872 # I want cheads = heads(::missingheads and ::commonheads)
1879 1873 # (missingheads is revs with secret changeset filtered out)
1880 1874 #
1881 1875 # This can be expressed as:
1882 1876 # cheads = ( (missingheads and ::commonheads)
1883 1877 # + (commonheads and ::missingheads))"
1884 1878 # )
1885 1879 #
1886 1880 # while trying to push we already computed the following:
1887 1881 # common = (::commonheads)
1888 1882 # missing = ((commonheads::missingheads) - commonheads)
1889 1883 #
1890 1884 # We can pick:
1891 1885 # * missingheads part of common (::commonheads)
1892 1886 common = set(outgoing.common)
1893 1887 cheads = [node for node in revs if node in common]
1894 1888 # and
1895 1889 # * commonheads parents on missing
1896 1890 revset = unfi.set('%ln and parents(roots(%ln))',
1897 1891 outgoing.commonheads,
1898 1892 outgoing.missing)
1899 1893 cheads.extend(c.node() for c in revset)
1900 1894 # even when we don't push, exchanging phase data is useful
1901 1895 remotephases = remote.listkeys('phases')
1902 1896 if (self.ui.configbool('ui', '_usedassubrepo', False)
1903 1897 and remotephases # server supports phases
1904 1898 and ret is None # nothing was pushed
1905 1899 and remotephases.get('publishing', False)):
1906 1900 # When:
1907 1901 # - this is a subrepo push
1908 1902 # - and remote support phase
1909 1903 # - and no changeset was pushed
1910 1904 # - and remote is publishing
1911 1905 # We may be in issue 3871 case!
1912 1906 # We drop the possible phase synchronisation done by
1913 1907 # courtesy to publish changesets possibly locally draft
1914 1908 # on the remote.
1915 1909 remotephases = {'publishing': 'True'}
1916 1910 if not remotephases: # old server or public only repo
1917 1911 localphasemove(cheads)
1918 1912 # don't push any phase data as there is nothing to push
1919 1913 else:
1920 1914 ana = phases.analyzeremotephases(self, cheads, remotephases)
1921 1915 pheads, droots = ana
1922 1916 ### Apply remote phase on local
1923 1917 if remotephases.get('publishing', False):
1924 1918 localphasemove(cheads)
1925 1919 else: # publish = False
1926 1920 localphasemove(pheads)
1927 1921 localphasemove(cheads, phases.draft)
1928 1922 ### Apply local phase on remote
1929 1923
1930 1924 # Get the list of all revs draft on remote by public here.
1931 1925 # XXX Beware that revset break if droots is not strictly
1932 1926 # XXX root we may want to ensure it is but it is costly
1933 1927 outdated = unfi.set('heads((%ln::%ln) and public())',
1934 1928 droots, cheads)
1935 1929 for newremotehead in outdated:
1936 1930 r = remote.pushkey('phases',
1937 1931 newremotehead.hex(),
1938 1932 str(phases.draft),
1939 1933 str(phases.public))
1940 1934 if not r:
1941 1935 self.ui.warn(_('updating %s to public failed!\n')
1942 1936 % newremotehead)
1943 1937 self.ui.debug('try to push obsolete markers to remote\n')
1944 1938 obsolete.syncpush(self, remote)
1945 1939 finally:
1946 1940 if lock is not None:
1947 1941 lock.release()
1948 1942 finally:
1949 1943 if locallock is not None:
1950 1944 locallock.release()
1951 1945
1952 1946 self.ui.debug("checking for updated bookmarks\n")
1953 1947 rb = remote.listkeys('bookmarks')
1954 1948 revnums = map(unfi.changelog.rev, revs or [])
1955 1949 ancestors = [
1956 1950 a for a in unfi.changelog.ancestors(revnums, inclusive=True)]
1957 1951 for k in rb.keys():
1958 1952 if k in unfi._bookmarks:
1959 1953 nr, nl = rb[k], hex(self._bookmarks[k])
1960 1954 if nr in unfi:
1961 1955 cr = unfi[nr]
1962 1956 cl = unfi[nl]
1963 1957 if bookmarks.validdest(unfi, cr, cl):
1964 1958 if ancestors and cl.rev() not in ancestors:
1965 1959 continue
1966 1960 r = remote.pushkey('bookmarks', k, nr, nl)
1967 1961 if r:
1968 1962 self.ui.status(_("updating bookmark %s\n") % k)
1969 1963 else:
1970 1964 self.ui.warn(_('updating bookmark %s'
1971 1965 ' failed!\n') % k)
1972 1966
1973 1967 return ret
1974 1968
1975 1969 def changegroupinfo(self, nodes, source):
1976 1970 if self.ui.verbose or source == 'bundle':
1977 1971 self.ui.status(_("%d changesets found\n") % len(nodes))
1978 1972 if self.ui.debugflag:
1979 1973 self.ui.debug("list of changesets:\n")
1980 1974 for node in nodes:
1981 1975 self.ui.debug("%s\n" % hex(node))
1982 1976
1983 1977 def changegroupsubset(self, bases, heads, source):
1984 1978 """Compute a changegroup consisting of all the nodes that are
1985 1979 descendants of any of the bases and ancestors of any of the heads.
1986 1980 Return a chunkbuffer object whose read() method will return
1987 1981 successive changegroup chunks.
1988 1982
1989 1983 It is fairly complex as determining which filenodes and which
1990 1984 manifest nodes need to be included for the changeset to be complete
1991 1985 is non-trivial.
1992 1986
1993 1987 Another wrinkle is doing the reverse, figuring out which changeset in
1994 1988 the changegroup a particular filenode or manifestnode belongs to.
1995 1989 """
1996 1990 cl = self.changelog
1997 1991 if not bases:
1998 1992 bases = [nullid]
1999 1993 # TODO: remove call to nodesbetween.
2000 1994 csets, bases, heads = cl.nodesbetween(bases, heads)
2001 1995 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
2002 1996 outgoing = discovery.outgoing(cl, bases, heads)
2003 1997 bundler = changegroup.bundle10(self)
2004 1998 return self._changegroupsubset(outgoing, bundler, source)
2005 1999
2006 2000 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2007 2001 """Like getbundle, but taking a discovery.outgoing as an argument.
2008 2002
2009 2003 This is only implemented for local repos and reuses potentially
2010 2004 precomputed sets in outgoing."""
2011 2005 if not outgoing.missing:
2012 2006 return None
2013 2007 bundler = changegroup.bundle10(self, bundlecaps)
2014 2008 return self._changegroupsubset(outgoing, bundler, source)
2015 2009
2016 2010 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2017 2011 """Like changegroupsubset, but returns the set difference between the
2018 2012 ancestors of heads and the ancestors common.
2019 2013
2020 2014 If heads is None, use the local heads. If common is None, use [nullid].
2021 2015
2022 2016 The nodes in common might not all be known locally due to the way the
2023 2017 current discovery protocol works.
2024 2018 """
2025 2019 cl = self.changelog
2026 2020 if common:
2027 2021 hasnode = cl.hasnode
2028 2022 common = [n for n in common if hasnode(n)]
2029 2023 else:
2030 2024 common = [nullid]
2031 2025 if not heads:
2032 2026 heads = cl.heads()
2033 2027 return self.getlocalbundle(source,
2034 2028 discovery.outgoing(cl, common, heads),
2035 2029 bundlecaps=bundlecaps)
2036 2030
2037 2031 @unfilteredmethod
2038 2032 def _changegroupsubset(self, outgoing, bundler, source,
2039 2033 fastpath=False):
2040 2034 commonrevs = outgoing.common
2041 2035 csets = outgoing.missing
2042 2036 heads = outgoing.missingheads
2043 2037 # We go through the fast path if we get told to, or if all (unfiltered
2044 2038 # heads have been requested (since we then know there all linkrevs will
2045 2039 # be pulled by the client).
2046 2040 heads.sort()
2047 2041 fastpathlinkrev = fastpath or (
2048 2042 self.filtername is None and heads == sorted(self.heads()))
2049 2043
2050 2044 self.hook('preoutgoing', throw=True, source=source)
2051 2045 self.changegroupinfo(csets, source)
2052 2046 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2053 2047 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2054 2048
2055 2049 def changegroup(self, basenodes, source):
2056 2050 # to avoid a race we use changegroupsubset() (issue1320)
2057 2051 return self.changegroupsubset(basenodes, self.heads(), source)
2058 2052
2059 2053 @unfilteredmethod
2060 2054 def addchangegroup(self, source, srctype, url, emptyok=False):
2061 2055 """Add the changegroup returned by source.read() to this repo.
2062 2056 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2063 2057 the URL of the repo where this changegroup is coming from.
2064 2058
2065 2059 Return an integer summarizing the change to this repo:
2066 2060 - nothing changed or no source: 0
2067 2061 - more heads than before: 1+added heads (2..n)
2068 2062 - fewer heads than before: -1-removed heads (-2..-n)
2069 2063 - number of heads stays the same: 1
2070 2064 """
2071 2065 def csmap(x):
2072 2066 self.ui.debug("add changeset %s\n" % short(x))
2073 2067 return len(cl)
2074 2068
2075 2069 def revmap(x):
2076 2070 return cl.rev(x)
2077 2071
2078 2072 if not source:
2079 2073 return 0
2080 2074
2081 2075 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2082 2076
2083 2077 changesets = files = revisions = 0
2084 2078 efiles = set()
2085 2079
2086 2080 # write changelog data to temp files so concurrent readers will not see
2087 2081 # inconsistent view
2088 2082 cl = self.changelog
2089 2083 cl.delayupdate()
2090 2084 oldheads = cl.heads()
2091 2085
2092 2086 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2093 2087 try:
2094 2088 trp = weakref.proxy(tr)
2095 2089 # pull off the changeset group
2096 2090 self.ui.status(_("adding changesets\n"))
2097 2091 clstart = len(cl)
2098 2092 class prog(object):
2099 2093 step = _('changesets')
2100 2094 count = 1
2101 2095 ui = self.ui
2102 2096 total = None
2103 2097 def __call__(self):
2104 2098 self.ui.progress(self.step, self.count, unit=_('chunks'),
2105 2099 total=self.total)
2106 2100 self.count += 1
2107 2101 pr = prog()
2108 2102 source.callback = pr
2109 2103
2110 2104 source.changelogheader()
2111 2105 srccontent = cl.addgroup(source, csmap, trp)
2112 2106 if not (srccontent or emptyok):
2113 2107 raise util.Abort(_("received changelog group is empty"))
2114 2108 clend = len(cl)
2115 2109 changesets = clend - clstart
2116 2110 for c in xrange(clstart, clend):
2117 2111 efiles.update(self[c].files())
2118 2112 efiles = len(efiles)
2119 2113 self.ui.progress(_('changesets'), None)
2120 2114
2121 2115 # pull off the manifest group
2122 2116 self.ui.status(_("adding manifests\n"))
2123 2117 pr.step = _('manifests')
2124 2118 pr.count = 1
2125 2119 pr.total = changesets # manifests <= changesets
2126 2120 # no need to check for empty manifest group here:
2127 2121 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2128 2122 # no new manifest will be created and the manifest group will
2129 2123 # be empty during the pull
2130 2124 source.manifestheader()
2131 2125 self.manifest.addgroup(source, revmap, trp)
2132 2126 self.ui.progress(_('manifests'), None)
2133 2127
2134 2128 needfiles = {}
2135 2129 if self.ui.configbool('server', 'validate', default=False):
2136 2130 # validate incoming csets have their manifests
2137 2131 for cset in xrange(clstart, clend):
2138 2132 mfest = self.changelog.read(self.changelog.node(cset))[0]
2139 2133 mfest = self.manifest.readdelta(mfest)
2140 2134 # store file nodes we must see
2141 2135 for f, n in mfest.iteritems():
2142 2136 needfiles.setdefault(f, set()).add(n)
2143 2137
2144 2138 # process the files
2145 2139 self.ui.status(_("adding file changes\n"))
2146 2140 pr.step = _('files')
2147 2141 pr.count = 1
2148 2142 pr.total = efiles
2149 2143 source.callback = None
2150 2144
2151 2145 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2152 2146 pr, needfiles)
2153 2147 revisions += newrevs
2154 2148 files += newfiles
2155 2149
2156 2150 dh = 0
2157 2151 if oldheads:
2158 2152 heads = cl.heads()
2159 2153 dh = len(heads) - len(oldheads)
2160 2154 for h in heads:
2161 2155 if h not in oldheads and self[h].closesbranch():
2162 2156 dh -= 1
2163 2157 htext = ""
2164 2158 if dh:
2165 2159 htext = _(" (%+d heads)") % dh
2166 2160
2167 2161 self.ui.status(_("added %d changesets"
2168 2162 " with %d changes to %d files%s\n")
2169 2163 % (changesets, revisions, files, htext))
2170 2164 self.invalidatevolatilesets()
2171 2165
2172 2166 if changesets > 0:
2173 2167 p = lambda: cl.writepending() and self.root or ""
2174 2168 self.hook('pretxnchangegroup', throw=True,
2175 2169 node=hex(cl.node(clstart)), source=srctype,
2176 2170 url=url, pending=p)
2177 2171
2178 2172 added = [cl.node(r) for r in xrange(clstart, clend)]
2179 2173 publishing = self.ui.configbool('phases', 'publish', True)
2180 2174 if srctype == 'push':
2181 2175 # Old server can not push the boundary themself.
2182 2176 # New server won't push the boundary if changeset already
2183 2177 # existed locally as secrete
2184 2178 #
2185 2179 # We should not use added here but the list of all change in
2186 2180 # the bundle
2187 2181 if publishing:
2188 2182 phases.advanceboundary(self, phases.public, srccontent)
2189 2183 else:
2190 2184 phases.advanceboundary(self, phases.draft, srccontent)
2191 2185 phases.retractboundary(self, phases.draft, added)
2192 2186 elif srctype != 'strip':
2193 2187 # publishing only alter behavior during push
2194 2188 #
2195 2189 # strip should not touch boundary at all
2196 2190 phases.retractboundary(self, phases.draft, added)
2197 2191
2198 2192 # make changelog see real files again
2199 2193 cl.finalize(trp)
2200 2194
2201 2195 tr.close()
2202 2196
2203 2197 if changesets > 0:
2204 2198 if srctype != 'strip':
2205 2199 # During strip, branchcache is invalid but coming call to
2206 2200 # `destroyed` will repair it.
2207 2201 # In other case we can safely update cache on disk.
2208 2202 branchmap.updatecache(self.filtered('served'))
2209 2203 def runhooks():
2210 2204 # forcefully update the on-disk branch cache
2211 2205 self.ui.debug("updating the branch cache\n")
2212 2206 self.hook("changegroup", node=hex(cl.node(clstart)),
2213 2207 source=srctype, url=url)
2214 2208
2215 2209 for n in added:
2216 2210 self.hook("incoming", node=hex(n), source=srctype,
2217 2211 url=url)
2218 2212
2219 2213 newheads = [h for h in self.heads() if h not in oldheads]
2220 2214 self.ui.log("incoming",
2221 2215 "%s incoming changes - new heads: %s\n",
2222 2216 len(added),
2223 2217 ', '.join([hex(c[:6]) for c in newheads]))
2224 2218 self._afterlock(runhooks)
2225 2219
2226 2220 finally:
2227 2221 tr.release()
2228 2222 # never return 0 here:
2229 2223 if dh < 0:
2230 2224 return dh - 1
2231 2225 else:
2232 2226 return dh + 1
2233 2227
2234 2228 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2235 2229 revisions = 0
2236 2230 files = 0
2237 2231 while True:
2238 2232 chunkdata = source.filelogheader()
2239 2233 if not chunkdata:
2240 2234 break
2241 2235 f = chunkdata["filename"]
2242 2236 self.ui.debug("adding %s revisions\n" % f)
2243 2237 pr()
2244 2238 fl = self.file(f)
2245 2239 o = len(fl)
2246 2240 if not fl.addgroup(source, revmap, trp):
2247 2241 raise util.Abort(_("received file revlog group is empty"))
2248 2242 revisions += len(fl) - o
2249 2243 files += 1
2250 2244 if f in needfiles:
2251 2245 needs = needfiles[f]
2252 2246 for new in xrange(o, len(fl)):
2253 2247 n = fl.node(new)
2254 2248 if n in needs:
2255 2249 needs.remove(n)
2256 2250 else:
2257 2251 raise util.Abort(
2258 2252 _("received spurious file revlog entry"))
2259 2253 if not needs:
2260 2254 del needfiles[f]
2261 2255 self.ui.progress(_('files'), None)
2262 2256
2263 2257 for f, needs in needfiles.iteritems():
2264 2258 fl = self.file(f)
2265 2259 for n in needs:
2266 2260 try:
2267 2261 fl.rev(n)
2268 2262 except error.LookupError:
2269 2263 raise util.Abort(
2270 2264 _('missing file data for %s:%s - run hg verify') %
2271 2265 (f, hex(n)))
2272 2266
2273 2267 return revisions, files
2274 2268
2275 2269 def stream_in(self, remote, requirements):
2276 2270 lock = self.lock()
2277 2271 try:
2278 2272 # Save remote branchmap. We will use it later
2279 2273 # to speed up branchcache creation
2280 2274 rbranchmap = None
2281 2275 if remote.capable("branchmap"):
2282 2276 rbranchmap = remote.branchmap()
2283 2277
2284 2278 fp = remote.stream_out()
2285 2279 l = fp.readline()
2286 2280 try:
2287 2281 resp = int(l)
2288 2282 except ValueError:
2289 2283 raise error.ResponseError(
2290 2284 _('unexpected response from remote server:'), l)
2291 2285 if resp == 1:
2292 2286 raise util.Abort(_('operation forbidden by server'))
2293 2287 elif resp == 2:
2294 2288 raise util.Abort(_('locking the remote repository failed'))
2295 2289 elif resp != 0:
2296 2290 raise util.Abort(_('the server sent an unknown error code'))
2297 2291 self.ui.status(_('streaming all changes\n'))
2298 2292 l = fp.readline()
2299 2293 try:
2300 2294 total_files, total_bytes = map(int, l.split(' ', 1))
2301 2295 except (ValueError, TypeError):
2302 2296 raise error.ResponseError(
2303 2297 _('unexpected response from remote server:'), l)
2304 2298 self.ui.status(_('%d files to transfer, %s of data\n') %
2305 2299 (total_files, util.bytecount(total_bytes)))
2306 2300 handled_bytes = 0
2307 2301 self.ui.progress(_('clone'), 0, total=total_bytes)
2308 2302 start = time.time()
2309 2303 for i in xrange(total_files):
2310 2304 # XXX doesn't support '\n' or '\r' in filenames
2311 2305 l = fp.readline()
2312 2306 try:
2313 2307 name, size = l.split('\0', 1)
2314 2308 size = int(size)
2315 2309 except (ValueError, TypeError):
2316 2310 raise error.ResponseError(
2317 2311 _('unexpected response from remote server:'), l)
2318 2312 if self.ui.debugflag:
2319 2313 self.ui.debug('adding %s (%s)\n' %
2320 2314 (name, util.bytecount(size)))
2321 2315 # for backwards compat, name was partially encoded
2322 2316 ofp = self.sopener(store.decodedir(name), 'w')
2323 2317 for chunk in util.filechunkiter(fp, limit=size):
2324 2318 handled_bytes += len(chunk)
2325 2319 self.ui.progress(_('clone'), handled_bytes,
2326 2320 total=total_bytes)
2327 2321 ofp.write(chunk)
2328 2322 ofp.close()
2329 2323 elapsed = time.time() - start
2330 2324 if elapsed <= 0:
2331 2325 elapsed = 0.001
2332 2326 self.ui.progress(_('clone'), None)
2333 2327 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2334 2328 (util.bytecount(total_bytes), elapsed,
2335 2329 util.bytecount(total_bytes / elapsed)))
2336 2330
2337 2331 # new requirements = old non-format requirements +
2338 2332 # new format-related
2339 2333 # requirements from the streamed-in repository
2340 2334 requirements.update(set(self.requirements) - self.supportedformats)
2341 2335 self._applyrequirements(requirements)
2342 2336 self._writerequirements()
2343 2337
2344 2338 if rbranchmap:
2345 2339 rbheads = []
2346 2340 for bheads in rbranchmap.itervalues():
2347 2341 rbheads.extend(bheads)
2348 2342
2349 2343 if rbheads:
2350 2344 rtiprev = max((int(self.changelog.rev(node))
2351 2345 for node in rbheads))
2352 2346 cache = branchmap.branchcache(rbranchmap,
2353 2347 self[rtiprev].node(),
2354 2348 rtiprev)
2355 2349 # Try to stick it as low as possible
2356 2350 # filter above served are unlikely to be fetch from a clone
2357 2351 for candidate in ('base', 'immutable', 'served'):
2358 2352 rview = self.filtered(candidate)
2359 2353 if cache.validfor(rview):
2360 2354 self._branchcaches[candidate] = cache
2361 2355 cache.write(rview)
2362 2356 break
2363 2357 self.invalidate()
2364 2358 return len(self.heads()) + 1
2365 2359 finally:
2366 2360 lock.release()
2367 2361
2368 2362 def clone(self, remote, heads=[], stream=False):
2369 2363 '''clone remote repository.
2370 2364
2371 2365 keyword arguments:
2372 2366 heads: list of revs to clone (forces use of pull)
2373 2367 stream: use streaming clone if possible'''
2374 2368
2375 2369 # now, all clients that can request uncompressed clones can
2376 2370 # read repo formats supported by all servers that can serve
2377 2371 # them.
2378 2372
2379 2373 # if revlog format changes, client will have to check version
2380 2374 # and format flags on "stream" capability, and use
2381 2375 # uncompressed only if compatible.
2382 2376
2383 2377 if not stream:
2384 2378 # if the server explicitly prefers to stream (for fast LANs)
2385 2379 stream = remote.capable('stream-preferred')
2386 2380
2387 2381 if stream and not heads:
2388 2382 # 'stream' means remote revlog format is revlogv1 only
2389 2383 if remote.capable('stream'):
2390 2384 return self.stream_in(remote, set(('revlogv1',)))
2391 2385 # otherwise, 'streamreqs' contains the remote revlog format
2392 2386 streamreqs = remote.capable('streamreqs')
2393 2387 if streamreqs:
2394 2388 streamreqs = set(streamreqs.split(','))
2395 2389 # if we support it, stream in and adjust our requirements
2396 2390 if not streamreqs - self.supportedformats:
2397 2391 return self.stream_in(remote, streamreqs)
2398 2392 return self.pull(remote, heads)
2399 2393
2400 2394 def pushkey(self, namespace, key, old, new):
2401 2395 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2402 2396 old=old, new=new)
2403 2397 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2404 2398 ret = pushkey.push(self, namespace, key, old, new)
2405 2399 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2406 2400 ret=ret)
2407 2401 return ret
2408 2402
2409 2403 def listkeys(self, namespace):
2410 2404 self.hook('prelistkeys', throw=True, namespace=namespace)
2411 2405 self.ui.debug('listing keys for "%s"\n' % namespace)
2412 2406 values = pushkey.list(self, namespace)
2413 2407 self.hook('listkeys', namespace=namespace, values=values)
2414 2408 return values
2415 2409
2416 2410 def debugwireargs(self, one, two, three=None, four=None, five=None):
2417 2411 '''used to test argument passing over the wire'''
2418 2412 return "%s %s %s %s %s" % (one, two, three, four, five)
2419 2413
2420 2414 def savecommitmessage(self, text):
2421 2415 fp = self.opener('last-message.txt', 'wb')
2422 2416 try:
2423 2417 fp.write(text)
2424 2418 finally:
2425 2419 fp.close()
2426 2420 return self.pathto(fp.name[len(self.root) + 1:])
2427 2421
2428 2422 # used to avoid circular references so destructors work
2429 2423 def aftertrans(files):
2430 2424 renamefiles = [tuple(t) for t in files]
2431 2425 def a():
2432 2426 for vfs, src, dest in renamefiles:
2433 2427 try:
2434 2428 vfs.rename(src, dest)
2435 2429 except OSError: # journal file does not yet exist
2436 2430 pass
2437 2431 return a
2438 2432
2439 2433 def undoname(fn):
2440 2434 base, name = os.path.split(fn)
2441 2435 assert name.startswith('journal')
2442 2436 return os.path.join(base, name.replace('journal', 'undo', 1))
2443 2437
2444 2438 def instance(ui, path, create):
2445 2439 return localrepository(ui, util.urllocalpath(path), create)
2446 2440
2447 2441 def islocal(path):
2448 2442 return True
General Comments 0
You need to be logged in to leave comments. Login now