##// END OF EJS Templates
changegroup: move changegroup file adding to a separate function...
Durham Goode -
r19291:93635f69 default
parent child Browse files
Show More
@@ -1,2433 +1,2443
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if a repo has an unfilteredpropertycache value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo.filtered('served')
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return self._repo.branchmap()
95 95
96 96 def heads(self):
97 97 return self._repo.heads()
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common,
104 104 bundlecaps=None)
105 105
106 106 # TODO We might want to move the next two calls into legacypeer and add
107 107 # unbundle instead.
108 108
109 109 def lock(self):
110 110 return self._repo.lock()
111 111
112 112 def addchangegroup(self, cg, source, url):
113 113 return self._repo.addchangegroup(cg, source, url)
114 114
115 115 def pushkey(self, namespace, key, old, new):
116 116 return self._repo.pushkey(namespace, key, old, new)
117 117
118 118 def listkeys(self, namespace):
119 119 return self._repo.listkeys(namespace)
120 120
121 121 def debugwireargs(self, one, two, three=None, four=None, five=None):
122 122 '''used to test argument passing over the wire'''
123 123 return "%s %s %s %s %s" % (one, two, three, four, five)
124 124
125 125 class locallegacypeer(localpeer):
126 126 '''peer extension which implements legacy methods too; used for tests with
127 127 restricted capabilities'''
128 128
129 129 def __init__(self, repo):
130 130 localpeer.__init__(self, repo, caps=LEGACYCAPS)
131 131
132 132 def branches(self, nodes):
133 133 return self._repo.branches(nodes)
134 134
135 135 def between(self, pairs):
136 136 return self._repo.between(pairs)
137 137
138 138 def changegroup(self, basenodes, source):
139 139 return self._repo.changegroup(basenodes, source)
140 140
141 141 def changegroupsubset(self, bases, heads, source):
142 142 return self._repo.changegroupsubset(bases, heads, source)
143 143
144 144 class localrepository(object):
145 145
146 146 supportedformats = set(('revlogv1', 'generaldelta'))
147 147 supported = supportedformats | set(('store', 'fncache', 'shared',
148 148 'dotencode'))
149 149 openerreqs = set(('revlogv1', 'generaldelta'))
150 150 requirements = ['revlogv1']
151 151 filtername = None
152 152
153 153 def _baserequirements(self, create):
154 154 return self.requirements[:]
155 155
156 156 def __init__(self, baseui, path=None, create=False):
157 157 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
158 158 self.wopener = self.wvfs
159 159 self.root = self.wvfs.base
160 160 self.path = self.wvfs.join(".hg")
161 161 self.origroot = path
162 162 self.auditor = scmutil.pathauditor(self.root, self._checknested)
163 163 self.vfs = scmutil.vfs(self.path)
164 164 self.opener = self.vfs
165 165 self.baseui = baseui
166 166 self.ui = baseui.copy()
167 167 # A list of callback to shape the phase if no data were found.
168 168 # Callback are in the form: func(repo, roots) --> processed root.
169 169 # This list it to be filled by extension during repo setup
170 170 self._phasedefaults = []
171 171 try:
172 172 self.ui.readconfig(self.join("hgrc"), self.root)
173 173 extensions.loadall(self.ui)
174 174 except IOError:
175 175 pass
176 176
177 177 if not self.vfs.isdir():
178 178 if create:
179 179 if not self.wvfs.exists():
180 180 self.wvfs.makedirs()
181 181 self.vfs.makedir(notindexed=True)
182 182 requirements = self._baserequirements(create)
183 183 if self.ui.configbool('format', 'usestore', True):
184 184 self.vfs.mkdir("store")
185 185 requirements.append("store")
186 186 if self.ui.configbool('format', 'usefncache', True):
187 187 requirements.append("fncache")
188 188 if self.ui.configbool('format', 'dotencode', True):
189 189 requirements.append('dotencode')
190 190 # create an invalid changelog
191 191 self.vfs.append(
192 192 "00changelog.i",
193 193 '\0\0\0\2' # represents revlogv2
194 194 ' dummy changelog to prevent using the old repo layout'
195 195 )
196 196 if self.ui.configbool('format', 'generaldelta', False):
197 197 requirements.append("generaldelta")
198 198 requirements = set(requirements)
199 199 else:
200 200 raise error.RepoError(_("repository %s not found") % path)
201 201 elif create:
202 202 raise error.RepoError(_("repository %s already exists") % path)
203 203 else:
204 204 try:
205 205 requirements = scmutil.readrequires(self.vfs, self.supported)
206 206 except IOError, inst:
207 207 if inst.errno != errno.ENOENT:
208 208 raise
209 209 requirements = set()
210 210
211 211 self.sharedpath = self.path
212 212 try:
213 213 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
214 214 realpath=True)
215 215 s = vfs.base
216 216 if not vfs.exists():
217 217 raise error.RepoError(
218 218 _('.hg/sharedpath points to nonexistent directory %s') % s)
219 219 self.sharedpath = s
220 220 except IOError, inst:
221 221 if inst.errno != errno.ENOENT:
222 222 raise
223 223
224 224 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
225 225 self.spath = self.store.path
226 226 self.svfs = self.store.vfs
227 227 self.sopener = self.svfs
228 228 self.sjoin = self.store.join
229 229 self.vfs.createmode = self.store.createmode
230 230 self._applyrequirements(requirements)
231 231 if create:
232 232 self._writerequirements()
233 233
234 234
235 235 self._branchcaches = {}
236 236 self.filterpats = {}
237 237 self._datafilters = {}
238 238 self._transref = self._lockref = self._wlockref = None
239 239
240 240 # A cache for various files under .hg/ that tracks file changes,
241 241 # (used by the filecache decorator)
242 242 #
243 243 # Maps a property name to its util.filecacheentry
244 244 self._filecache = {}
245 245
246 246 # hold sets of revision to be filtered
247 247 # should be cleared when something might have changed the filter value:
248 248 # - new changesets,
249 249 # - phase change,
250 250 # - new obsolescence marker,
251 251 # - working directory parent change,
252 252 # - bookmark changes
253 253 self.filteredrevcache = {}
254 254
255 255 def close(self):
256 256 pass
257 257
258 258 def _restrictcapabilities(self, caps):
259 259 return caps
260 260
261 261 def _applyrequirements(self, requirements):
262 262 self.requirements = requirements
263 263 self.sopener.options = dict((r, 1) for r in requirements
264 264 if r in self.openerreqs)
265 265
266 266 def _writerequirements(self):
267 267 reqfile = self.opener("requires", "w")
268 268 for r in sorted(self.requirements):
269 269 reqfile.write("%s\n" % r)
270 270 reqfile.close()
271 271
272 272 def _checknested(self, path):
273 273 """Determine if path is a legal nested repository."""
274 274 if not path.startswith(self.root):
275 275 return False
276 276 subpath = path[len(self.root) + 1:]
277 277 normsubpath = util.pconvert(subpath)
278 278
279 279 # XXX: Checking against the current working copy is wrong in
280 280 # the sense that it can reject things like
281 281 #
282 282 # $ hg cat -r 10 sub/x.txt
283 283 #
284 284 # if sub/ is no longer a subrepository in the working copy
285 285 # parent revision.
286 286 #
287 287 # However, it can of course also allow things that would have
288 288 # been rejected before, such as the above cat command if sub/
289 289 # is a subrepository now, but was a normal directory before.
290 290 # The old path auditor would have rejected by mistake since it
291 291 # panics when it sees sub/.hg/.
292 292 #
293 293 # All in all, checking against the working copy seems sensible
294 294 # since we want to prevent access to nested repositories on
295 295 # the filesystem *now*.
296 296 ctx = self[None]
297 297 parts = util.splitpath(subpath)
298 298 while parts:
299 299 prefix = '/'.join(parts)
300 300 if prefix in ctx.substate:
301 301 if prefix == normsubpath:
302 302 return True
303 303 else:
304 304 sub = ctx.sub(prefix)
305 305 return sub.checknested(subpath[len(prefix) + 1:])
306 306 else:
307 307 parts.pop()
308 308 return False
309 309
310 310 def peer(self):
311 311 return localpeer(self) # not cached to avoid reference cycle
312 312
313 313 def unfiltered(self):
314 314 """Return unfiltered version of the repository
315 315
316 316 Intended to be overwritten by filtered repo."""
317 317 return self
318 318
319 319 def filtered(self, name):
320 320 """Return a filtered version of a repository"""
321 321 # build a new class with the mixin and the current class
322 322 # (possibly subclass of the repo)
323 323 class proxycls(repoview.repoview, self.unfiltered().__class__):
324 324 pass
325 325 return proxycls(self, name)
326 326
327 327 @repofilecache('bookmarks')
328 328 def _bookmarks(self):
329 329 return bookmarks.bmstore(self)
330 330
331 331 @repofilecache('bookmarks.current')
332 332 def _bookmarkcurrent(self):
333 333 return bookmarks.readcurrent(self)
334 334
335 335 def bookmarkheads(self, bookmark):
336 336 name = bookmark.split('@', 1)[0]
337 337 heads = []
338 338 for mark, n in self._bookmarks.iteritems():
339 339 if mark.split('@', 1)[0] == name:
340 340 heads.append(n)
341 341 return heads
342 342
343 343 @storecache('phaseroots')
344 344 def _phasecache(self):
345 345 return phases.phasecache(self, self._phasedefaults)
346 346
347 347 @storecache('obsstore')
348 348 def obsstore(self):
349 349 store = obsolete.obsstore(self.sopener)
350 350 if store and not obsolete._enabled:
351 351 # message is rare enough to not be translated
352 352 msg = 'obsolete feature not enabled but %i markers found!\n'
353 353 self.ui.warn(msg % len(list(store)))
354 354 return store
355 355
356 356 @storecache('00changelog.i')
357 357 def changelog(self):
358 358 c = changelog.changelog(self.sopener)
359 359 if 'HG_PENDING' in os.environ:
360 360 p = os.environ['HG_PENDING']
361 361 if p.startswith(self.root):
362 362 c.readpending('00changelog.i.a')
363 363 return c
364 364
365 365 @storecache('00manifest.i')
366 366 def manifest(self):
367 367 return manifest.manifest(self.sopener)
368 368
369 369 @repofilecache('dirstate')
370 370 def dirstate(self):
371 371 warned = [0]
372 372 def validate(node):
373 373 try:
374 374 self.changelog.rev(node)
375 375 return node
376 376 except error.LookupError:
377 377 if not warned[0]:
378 378 warned[0] = True
379 379 self.ui.warn(_("warning: ignoring unknown"
380 380 " working parent %s!\n") % short(node))
381 381 return nullid
382 382
383 383 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
384 384
385 385 def __getitem__(self, changeid):
386 386 if changeid is None:
387 387 return context.workingctx(self)
388 388 return context.changectx(self, changeid)
389 389
390 390 def __contains__(self, changeid):
391 391 try:
392 392 return bool(self.lookup(changeid))
393 393 except error.RepoLookupError:
394 394 return False
395 395
396 396 def __nonzero__(self):
397 397 return True
398 398
399 399 def __len__(self):
400 400 return len(self.changelog)
401 401
402 402 def __iter__(self):
403 403 return iter(self.changelog)
404 404
405 405 def revs(self, expr, *args):
406 406 '''Return a list of revisions matching the given revset'''
407 407 expr = revset.formatspec(expr, *args)
408 408 m = revset.match(None, expr)
409 409 return [r for r in m(self, list(self))]
410 410
411 411 def set(self, expr, *args):
412 412 '''
413 413 Yield a context for each matching revision, after doing arg
414 414 replacement via revset.formatspec
415 415 '''
416 416 for r in self.revs(expr, *args):
417 417 yield self[r]
418 418
419 419 def url(self):
420 420 return 'file:' + self.root
421 421
422 422 def hook(self, name, throw=False, **args):
423 423 return hook.hook(self.ui, self, name, throw, **args)
424 424
425 425 @unfilteredmethod
426 426 def _tag(self, names, node, message, local, user, date, extra={}):
427 427 if isinstance(names, str):
428 428 names = (names,)
429 429
430 430 branches = self.branchmap()
431 431 for name in names:
432 432 self.hook('pretag', throw=True, node=hex(node), tag=name,
433 433 local=local)
434 434 if name in branches:
435 435 self.ui.warn(_("warning: tag %s conflicts with existing"
436 436 " branch name\n") % name)
437 437
438 438 def writetags(fp, names, munge, prevtags):
439 439 fp.seek(0, 2)
440 440 if prevtags and prevtags[-1] != '\n':
441 441 fp.write('\n')
442 442 for name in names:
443 443 m = munge and munge(name) or name
444 444 if (self._tagscache.tagtypes and
445 445 name in self._tagscache.tagtypes):
446 446 old = self.tags().get(name, nullid)
447 447 fp.write('%s %s\n' % (hex(old), m))
448 448 fp.write('%s %s\n' % (hex(node), m))
449 449 fp.close()
450 450
451 451 prevtags = ''
452 452 if local:
453 453 try:
454 454 fp = self.opener('localtags', 'r+')
455 455 except IOError:
456 456 fp = self.opener('localtags', 'a')
457 457 else:
458 458 prevtags = fp.read()
459 459
460 460 # local tags are stored in the current charset
461 461 writetags(fp, names, None, prevtags)
462 462 for name in names:
463 463 self.hook('tag', node=hex(node), tag=name, local=local)
464 464 return
465 465
466 466 try:
467 467 fp = self.wfile('.hgtags', 'rb+')
468 468 except IOError, e:
469 469 if e.errno != errno.ENOENT:
470 470 raise
471 471 fp = self.wfile('.hgtags', 'ab')
472 472 else:
473 473 prevtags = fp.read()
474 474
475 475 # committed tags are stored in UTF-8
476 476 writetags(fp, names, encoding.fromlocal, prevtags)
477 477
478 478 fp.close()
479 479
480 480 self.invalidatecaches()
481 481
482 482 if '.hgtags' not in self.dirstate:
483 483 self[None].add(['.hgtags'])
484 484
485 485 m = matchmod.exact(self.root, '', ['.hgtags'])
486 486 tagnode = self.commit(message, user, date, extra=extra, match=m)
487 487
488 488 for name in names:
489 489 self.hook('tag', node=hex(node), tag=name, local=local)
490 490
491 491 return tagnode
492 492
493 493 def tag(self, names, node, message, local, user, date):
494 494 '''tag a revision with one or more symbolic names.
495 495
496 496 names is a list of strings or, when adding a single tag, names may be a
497 497 string.
498 498
499 499 if local is True, the tags are stored in a per-repository file.
500 500 otherwise, they are stored in the .hgtags file, and a new
501 501 changeset is committed with the change.
502 502
503 503 keyword arguments:
504 504
505 505 local: whether to store tags in non-version-controlled file
506 506 (default False)
507 507
508 508 message: commit message to use if committing
509 509
510 510 user: name of user to use if committing
511 511
512 512 date: date tuple to use if committing'''
513 513
514 514 if not local:
515 515 for x in self.status()[:5]:
516 516 if '.hgtags' in x:
517 517 raise util.Abort(_('working copy of .hgtags is changed '
518 518 '(please commit .hgtags manually)'))
519 519
520 520 self.tags() # instantiate the cache
521 521 self._tag(names, node, message, local, user, date)
522 522
523 523 @filteredpropertycache
524 524 def _tagscache(self):
525 525 '''Returns a tagscache object that contains various tags related
526 526 caches.'''
527 527
528 528 # This simplifies its cache management by having one decorated
529 529 # function (this one) and the rest simply fetch things from it.
530 530 class tagscache(object):
531 531 def __init__(self):
532 532 # These two define the set of tags for this repository. tags
533 533 # maps tag name to node; tagtypes maps tag name to 'global' or
534 534 # 'local'. (Global tags are defined by .hgtags across all
535 535 # heads, and local tags are defined in .hg/localtags.)
536 536 # They constitute the in-memory cache of tags.
537 537 self.tags = self.tagtypes = None
538 538
539 539 self.nodetagscache = self.tagslist = None
540 540
541 541 cache = tagscache()
542 542 cache.tags, cache.tagtypes = self._findtags()
543 543
544 544 return cache
545 545
546 546 def tags(self):
547 547 '''return a mapping of tag to node'''
548 548 t = {}
549 549 if self.changelog.filteredrevs:
550 550 tags, tt = self._findtags()
551 551 else:
552 552 tags = self._tagscache.tags
553 553 for k, v in tags.iteritems():
554 554 try:
555 555 # ignore tags to unknown nodes
556 556 self.changelog.rev(v)
557 557 t[k] = v
558 558 except (error.LookupError, ValueError):
559 559 pass
560 560 return t
561 561
562 562 def _findtags(self):
563 563 '''Do the hard work of finding tags. Return a pair of dicts
564 564 (tags, tagtypes) where tags maps tag name to node, and tagtypes
565 565 maps tag name to a string like \'global\' or \'local\'.
566 566 Subclasses or extensions are free to add their own tags, but
567 567 should be aware that the returned dicts will be retained for the
568 568 duration of the localrepo object.'''
569 569
570 570 # XXX what tagtype should subclasses/extensions use? Currently
571 571 # mq and bookmarks add tags, but do not set the tagtype at all.
572 572 # Should each extension invent its own tag type? Should there
573 573 # be one tagtype for all such "virtual" tags? Or is the status
574 574 # quo fine?
575 575
576 576 alltags = {} # map tag name to (node, hist)
577 577 tagtypes = {}
578 578
579 579 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
580 580 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
581 581
582 582 # Build the return dicts. Have to re-encode tag names because
583 583 # the tags module always uses UTF-8 (in order not to lose info
584 584 # writing to the cache), but the rest of Mercurial wants them in
585 585 # local encoding.
586 586 tags = {}
587 587 for (name, (node, hist)) in alltags.iteritems():
588 588 if node != nullid:
589 589 tags[encoding.tolocal(name)] = node
590 590 tags['tip'] = self.changelog.tip()
591 591 tagtypes = dict([(encoding.tolocal(name), value)
592 592 for (name, value) in tagtypes.iteritems()])
593 593 return (tags, tagtypes)
594 594
595 595 def tagtype(self, tagname):
596 596 '''
597 597 return the type of the given tag. result can be:
598 598
599 599 'local' : a local tag
600 600 'global' : a global tag
601 601 None : tag does not exist
602 602 '''
603 603
604 604 return self._tagscache.tagtypes.get(tagname)
605 605
606 606 def tagslist(self):
607 607 '''return a list of tags ordered by revision'''
608 608 if not self._tagscache.tagslist:
609 609 l = []
610 610 for t, n in self.tags().iteritems():
611 611 r = self.changelog.rev(n)
612 612 l.append((r, t, n))
613 613 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
614 614
615 615 return self._tagscache.tagslist
616 616
617 617 def nodetags(self, node):
618 618 '''return the tags associated with a node'''
619 619 if not self._tagscache.nodetagscache:
620 620 nodetagscache = {}
621 621 for t, n in self._tagscache.tags.iteritems():
622 622 nodetagscache.setdefault(n, []).append(t)
623 623 for tags in nodetagscache.itervalues():
624 624 tags.sort()
625 625 self._tagscache.nodetagscache = nodetagscache
626 626 return self._tagscache.nodetagscache.get(node, [])
627 627
628 628 def nodebookmarks(self, node):
629 629 marks = []
630 630 for bookmark, n in self._bookmarks.iteritems():
631 631 if n == node:
632 632 marks.append(bookmark)
633 633 return sorted(marks)
634 634
635 635 def branchmap(self):
636 636 '''returns a dictionary {branch: [branchheads]}'''
637 637 branchmap.updatecache(self)
638 638 return self._branchcaches[self.filtername]
639 639
640 640
641 641 def _branchtip(self, heads):
642 642 '''return the tipmost branch head in heads'''
643 643 tip = heads[-1]
644 644 for h in reversed(heads):
645 645 if not self[h].closesbranch():
646 646 tip = h
647 647 break
648 648 return tip
649 649
650 650 def branchtip(self, branch):
651 651 '''return the tip node for a given branch'''
652 652 if branch not in self.branchmap():
653 653 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
654 654 return self._branchtip(self.branchmap()[branch])
655 655
656 656 def branchtags(self):
657 657 '''return a dict where branch names map to the tipmost head of
658 658 the branch, open heads come before closed'''
659 659 bt = {}
660 660 for bn, heads in self.branchmap().iteritems():
661 661 bt[bn] = self._branchtip(heads)
662 662 return bt
663 663
664 664 def lookup(self, key):
665 665 return self[key].node()
666 666
667 667 def lookupbranch(self, key, remote=None):
668 668 repo = remote or self
669 669 if key in repo.branchmap():
670 670 return key
671 671
672 672 repo = (remote and remote.local()) and remote or self
673 673 return repo[key].branch()
674 674
675 675 def known(self, nodes):
676 676 nm = self.changelog.nodemap
677 677 pc = self._phasecache
678 678 result = []
679 679 for n in nodes:
680 680 r = nm.get(n)
681 681 resp = not (r is None or pc.phase(self, r) >= phases.secret)
682 682 result.append(resp)
683 683 return result
684 684
685 685 def local(self):
686 686 return self
687 687
688 688 def cancopy(self):
689 689 return self.local() # so statichttprepo's override of local() works
690 690
691 691 def join(self, f):
692 692 return os.path.join(self.path, f)
693 693
694 694 def wjoin(self, f):
695 695 return os.path.join(self.root, f)
696 696
697 697 def file(self, f):
698 698 if f[0] == '/':
699 699 f = f[1:]
700 700 return filelog.filelog(self.sopener, f)
701 701
702 702 def changectx(self, changeid):
703 703 return self[changeid]
704 704
705 705 def parents(self, changeid=None):
706 706 '''get list of changectxs for parents of changeid'''
707 707 return self[changeid].parents()
708 708
709 709 def setparents(self, p1, p2=nullid):
710 710 copies = self.dirstate.setparents(p1, p2)
711 711 pctx = self[p1]
712 712 if copies:
713 713 # Adjust copy records, the dirstate cannot do it, it
714 714 # requires access to parents manifests. Preserve them
715 715 # only for entries added to first parent.
716 716 for f in copies:
717 717 if f not in pctx and copies[f] in pctx:
718 718 self.dirstate.copy(copies[f], f)
719 719 if p2 == nullid:
720 720 for f, s in sorted(self.dirstate.copies().items()):
721 721 if f not in pctx and s not in pctx:
722 722 self.dirstate.copy(None, f)
723 723
724 724 def filectx(self, path, changeid=None, fileid=None):
725 725 """changeid can be a changeset revision, node, or tag.
726 726 fileid can be a file revision or node."""
727 727 return context.filectx(self, path, changeid, fileid)
728 728
729 729 def getcwd(self):
730 730 return self.dirstate.getcwd()
731 731
732 732 def pathto(self, f, cwd=None):
733 733 return self.dirstate.pathto(f, cwd)
734 734
735 735 def wfile(self, f, mode='r'):
736 736 return self.wopener(f, mode)
737 737
738 738 def _link(self, f):
739 739 return self.wvfs.islink(f)
740 740
741 741 def _loadfilter(self, filter):
742 742 if filter not in self.filterpats:
743 743 l = []
744 744 for pat, cmd in self.ui.configitems(filter):
745 745 if cmd == '!':
746 746 continue
747 747 mf = matchmod.match(self.root, '', [pat])
748 748 fn = None
749 749 params = cmd
750 750 for name, filterfn in self._datafilters.iteritems():
751 751 if cmd.startswith(name):
752 752 fn = filterfn
753 753 params = cmd[len(name):].lstrip()
754 754 break
755 755 if not fn:
756 756 fn = lambda s, c, **kwargs: util.filter(s, c)
757 757 # Wrap old filters not supporting keyword arguments
758 758 if not inspect.getargspec(fn)[2]:
759 759 oldfn = fn
760 760 fn = lambda s, c, **kwargs: oldfn(s, c)
761 761 l.append((mf, fn, params))
762 762 self.filterpats[filter] = l
763 763 return self.filterpats[filter]
764 764
765 765 def _filter(self, filterpats, filename, data):
766 766 for mf, fn, cmd in filterpats:
767 767 if mf(filename):
768 768 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
769 769 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
770 770 break
771 771
772 772 return data
773 773
774 774 @unfilteredpropertycache
775 775 def _encodefilterpats(self):
776 776 return self._loadfilter('encode')
777 777
778 778 @unfilteredpropertycache
779 779 def _decodefilterpats(self):
780 780 return self._loadfilter('decode')
781 781
782 782 def adddatafilter(self, name, filter):
783 783 self._datafilters[name] = filter
784 784
785 785 def wread(self, filename):
786 786 if self._link(filename):
787 787 data = self.wvfs.readlink(filename)
788 788 else:
789 789 data = self.wopener.read(filename)
790 790 return self._filter(self._encodefilterpats, filename, data)
791 791
792 792 def wwrite(self, filename, data, flags):
793 793 data = self._filter(self._decodefilterpats, filename, data)
794 794 if 'l' in flags:
795 795 self.wopener.symlink(data, filename)
796 796 else:
797 797 self.wopener.write(filename, data)
798 798 if 'x' in flags:
799 799 self.wvfs.setflags(filename, False, True)
800 800
801 801 def wwritedata(self, filename, data):
802 802 return self._filter(self._decodefilterpats, filename, data)
803 803
804 804 def transaction(self, desc):
805 805 tr = self._transref and self._transref() or None
806 806 if tr and tr.running():
807 807 return tr.nest()
808 808
809 809 # abort here if the journal already exists
810 810 if self.svfs.exists("journal"):
811 811 raise error.RepoError(
812 812 _("abandoned transaction found - run hg recover"))
813 813
814 814 self._writejournal(desc)
815 815 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
816 816
817 817 tr = transaction.transaction(self.ui.warn, self.sopener,
818 818 self.sjoin("journal"),
819 819 aftertrans(renames),
820 820 self.store.createmode)
821 821 self._transref = weakref.ref(tr)
822 822 return tr
823 823
824 824 def _journalfiles(self):
825 825 return ((self.svfs, 'journal'),
826 826 (self.vfs, 'journal.dirstate'),
827 827 (self.vfs, 'journal.branch'),
828 828 (self.vfs, 'journal.desc'),
829 829 (self.vfs, 'journal.bookmarks'),
830 830 (self.svfs, 'journal.phaseroots'))
831 831
832 832 def undofiles(self):
833 833 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
834 834
835 835 def _writejournal(self, desc):
836 836 self.opener.write("journal.dirstate",
837 837 self.opener.tryread("dirstate"))
838 838 self.opener.write("journal.branch",
839 839 encoding.fromlocal(self.dirstate.branch()))
840 840 self.opener.write("journal.desc",
841 841 "%d\n%s\n" % (len(self), desc))
842 842 self.opener.write("journal.bookmarks",
843 843 self.opener.tryread("bookmarks"))
844 844 self.sopener.write("journal.phaseroots",
845 845 self.sopener.tryread("phaseroots"))
846 846
847 847 def recover(self):
848 848 lock = self.lock()
849 849 try:
850 850 if self.svfs.exists("journal"):
851 851 self.ui.status(_("rolling back interrupted transaction\n"))
852 852 transaction.rollback(self.sopener, self.sjoin("journal"),
853 853 self.ui.warn)
854 854 self.invalidate()
855 855 return True
856 856 else:
857 857 self.ui.warn(_("no interrupted transaction available\n"))
858 858 return False
859 859 finally:
860 860 lock.release()
861 861
862 862 def rollback(self, dryrun=False, force=False):
863 863 wlock = lock = None
864 864 try:
865 865 wlock = self.wlock()
866 866 lock = self.lock()
867 867 if self.svfs.exists("undo"):
868 868 return self._rollback(dryrun, force)
869 869 else:
870 870 self.ui.warn(_("no rollback information available\n"))
871 871 return 1
872 872 finally:
873 873 release(lock, wlock)
874 874
875 875 @unfilteredmethod # Until we get smarter cache management
876 876 def _rollback(self, dryrun, force):
877 877 ui = self.ui
878 878 try:
879 879 args = self.opener.read('undo.desc').splitlines()
880 880 (oldlen, desc, detail) = (int(args[0]), args[1], None)
881 881 if len(args) >= 3:
882 882 detail = args[2]
883 883 oldtip = oldlen - 1
884 884
885 885 if detail and ui.verbose:
886 886 msg = (_('repository tip rolled back to revision %s'
887 887 ' (undo %s: %s)\n')
888 888 % (oldtip, desc, detail))
889 889 else:
890 890 msg = (_('repository tip rolled back to revision %s'
891 891 ' (undo %s)\n')
892 892 % (oldtip, desc))
893 893 except IOError:
894 894 msg = _('rolling back unknown transaction\n')
895 895 desc = None
896 896
897 897 if not force and self['.'] != self['tip'] and desc == 'commit':
898 898 raise util.Abort(
899 899 _('rollback of last commit while not checked out '
900 900 'may lose data'), hint=_('use -f to force'))
901 901
902 902 ui.status(msg)
903 903 if dryrun:
904 904 return 0
905 905
906 906 parents = self.dirstate.parents()
907 907 self.destroying()
908 908 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
909 909 if self.vfs.exists('undo.bookmarks'):
910 910 self.vfs.rename('undo.bookmarks', 'bookmarks')
911 911 if self.svfs.exists('undo.phaseroots'):
912 912 self.svfs.rename('undo.phaseroots', 'phaseroots')
913 913 self.invalidate()
914 914
915 915 parentgone = (parents[0] not in self.changelog.nodemap or
916 916 parents[1] not in self.changelog.nodemap)
917 917 if parentgone:
918 918 self.vfs.rename('undo.dirstate', 'dirstate')
919 919 try:
920 920 branch = self.opener.read('undo.branch')
921 921 self.dirstate.setbranch(encoding.tolocal(branch))
922 922 except IOError:
923 923 ui.warn(_('named branch could not be reset: '
924 924 'current branch is still \'%s\'\n')
925 925 % self.dirstate.branch())
926 926
927 927 self.dirstate.invalidate()
928 928 parents = tuple([p.rev() for p in self.parents()])
929 929 if len(parents) > 1:
930 930 ui.status(_('working directory now based on '
931 931 'revisions %d and %d\n') % parents)
932 932 else:
933 933 ui.status(_('working directory now based on '
934 934 'revision %d\n') % parents)
935 935 # TODO: if we know which new heads may result from this rollback, pass
936 936 # them to destroy(), which will prevent the branchhead cache from being
937 937 # invalidated.
938 938 self.destroyed()
939 939 return 0
940 940
941 941 def invalidatecaches(self):
942 942
943 943 if '_tagscache' in vars(self):
944 944 # can't use delattr on proxy
945 945 del self.__dict__['_tagscache']
946 946
947 947 self.unfiltered()._branchcaches.clear()
948 948 self.invalidatevolatilesets()
949 949
950 950 def invalidatevolatilesets(self):
951 951 self.filteredrevcache.clear()
952 952 obsolete.clearobscaches(self)
953 953
954 954 def invalidatedirstate(self):
955 955 '''Invalidates the dirstate, causing the next call to dirstate
956 956 to check if it was modified since the last time it was read,
957 957 rereading it if it has.
958 958
959 959 This is different to dirstate.invalidate() that it doesn't always
960 960 rereads the dirstate. Use dirstate.invalidate() if you want to
961 961 explicitly read the dirstate again (i.e. restoring it to a previous
962 962 known good state).'''
963 963 if hasunfilteredcache(self, 'dirstate'):
964 964 for k in self.dirstate._filecache:
965 965 try:
966 966 delattr(self.dirstate, k)
967 967 except AttributeError:
968 968 pass
969 969 delattr(self.unfiltered(), 'dirstate')
970 970
971 971 def invalidate(self):
972 972 unfiltered = self.unfiltered() # all file caches are stored unfiltered
973 973 for k in self._filecache:
974 974 # dirstate is invalidated separately in invalidatedirstate()
975 975 if k == 'dirstate':
976 976 continue
977 977
978 978 try:
979 979 delattr(unfiltered, k)
980 980 except AttributeError:
981 981 pass
982 982 self.invalidatecaches()
983 983
984 984 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
985 985 try:
986 986 l = lock.lock(lockname, 0, releasefn, desc=desc)
987 987 except error.LockHeld, inst:
988 988 if not wait:
989 989 raise
990 990 self.ui.warn(_("waiting for lock on %s held by %r\n") %
991 991 (desc, inst.locker))
992 992 # default to 600 seconds timeout
993 993 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
994 994 releasefn, desc=desc)
995 995 if acquirefn:
996 996 acquirefn()
997 997 return l
998 998
999 999 def _afterlock(self, callback):
1000 1000 """add a callback to the current repository lock.
1001 1001
1002 1002 The callback will be executed on lock release."""
1003 1003 l = self._lockref and self._lockref()
1004 1004 if l:
1005 1005 l.postrelease.append(callback)
1006 1006 else:
1007 1007 callback()
1008 1008
1009 1009 def lock(self, wait=True):
1010 1010 '''Lock the repository store (.hg/store) and return a weak reference
1011 1011 to the lock. Use this before modifying the store (e.g. committing or
1012 1012 stripping). If you are opening a transaction, get a lock as well.)'''
1013 1013 l = self._lockref and self._lockref()
1014 1014 if l is not None and l.held:
1015 1015 l.lock()
1016 1016 return l
1017 1017
1018 1018 def unlock():
1019 1019 self.store.write()
1020 1020 if hasunfilteredcache(self, '_phasecache'):
1021 1021 self._phasecache.write()
1022 1022 for k, ce in self._filecache.items():
1023 1023 if k == 'dirstate' or k not in self.__dict__:
1024 1024 continue
1025 1025 ce.refresh()
1026 1026
1027 1027 l = self._lock(self.sjoin("lock"), wait, unlock,
1028 1028 self.invalidate, _('repository %s') % self.origroot)
1029 1029 self._lockref = weakref.ref(l)
1030 1030 return l
1031 1031
1032 1032 def wlock(self, wait=True):
1033 1033 '''Lock the non-store parts of the repository (everything under
1034 1034 .hg except .hg/store) and return a weak reference to the lock.
1035 1035 Use this before modifying files in .hg.'''
1036 1036 l = self._wlockref and self._wlockref()
1037 1037 if l is not None and l.held:
1038 1038 l.lock()
1039 1039 return l
1040 1040
1041 1041 def unlock():
1042 1042 self.dirstate.write()
1043 1043 self._filecache['dirstate'].refresh()
1044 1044
1045 1045 l = self._lock(self.join("wlock"), wait, unlock,
1046 1046 self.invalidatedirstate, _('working directory of %s') %
1047 1047 self.origroot)
1048 1048 self._wlockref = weakref.ref(l)
1049 1049 return l
1050 1050
1051 1051 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1052 1052 """
1053 1053 commit an individual file as part of a larger transaction
1054 1054 """
1055 1055
1056 1056 fname = fctx.path()
1057 1057 text = fctx.data()
1058 1058 flog = self.file(fname)
1059 1059 fparent1 = manifest1.get(fname, nullid)
1060 1060 fparent2 = fparent2o = manifest2.get(fname, nullid)
1061 1061
1062 1062 meta = {}
1063 1063 copy = fctx.renamed()
1064 1064 if copy and copy[0] != fname:
1065 1065 # Mark the new revision of this file as a copy of another
1066 1066 # file. This copy data will effectively act as a parent
1067 1067 # of this new revision. If this is a merge, the first
1068 1068 # parent will be the nullid (meaning "look up the copy data")
1069 1069 # and the second one will be the other parent. For example:
1070 1070 #
1071 1071 # 0 --- 1 --- 3 rev1 changes file foo
1072 1072 # \ / rev2 renames foo to bar and changes it
1073 1073 # \- 2 -/ rev3 should have bar with all changes and
1074 1074 # should record that bar descends from
1075 1075 # bar in rev2 and foo in rev1
1076 1076 #
1077 1077 # this allows this merge to succeed:
1078 1078 #
1079 1079 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1080 1080 # \ / merging rev3 and rev4 should use bar@rev2
1081 1081 # \- 2 --- 4 as the merge base
1082 1082 #
1083 1083
1084 1084 cfname = copy[0]
1085 1085 crev = manifest1.get(cfname)
1086 1086 newfparent = fparent2
1087 1087
1088 1088 if manifest2: # branch merge
1089 1089 if fparent2 == nullid or crev is None: # copied on remote side
1090 1090 if cfname in manifest2:
1091 1091 crev = manifest2[cfname]
1092 1092 newfparent = fparent1
1093 1093
1094 1094 # find source in nearest ancestor if we've lost track
1095 1095 if not crev:
1096 1096 self.ui.debug(" %s: searching for copy revision for %s\n" %
1097 1097 (fname, cfname))
1098 1098 for ancestor in self[None].ancestors():
1099 1099 if cfname in ancestor:
1100 1100 crev = ancestor[cfname].filenode()
1101 1101 break
1102 1102
1103 1103 if crev:
1104 1104 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1105 1105 meta["copy"] = cfname
1106 1106 meta["copyrev"] = hex(crev)
1107 1107 fparent1, fparent2 = nullid, newfparent
1108 1108 else:
1109 1109 self.ui.warn(_("warning: can't find ancestor for '%s' "
1110 1110 "copied from '%s'!\n") % (fname, cfname))
1111 1111
1112 1112 elif fparent2 != nullid:
1113 1113 # is one parent an ancestor of the other?
1114 1114 fparentancestor = flog.ancestor(fparent1, fparent2)
1115 1115 if fparentancestor == fparent1:
1116 1116 fparent1, fparent2 = fparent2, nullid
1117 1117 elif fparentancestor == fparent2:
1118 1118 fparent2 = nullid
1119 1119
1120 1120 # is the file changed?
1121 1121 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1122 1122 changelist.append(fname)
1123 1123 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1124 1124
1125 1125 # are just the flags changed during merge?
1126 1126 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1127 1127 changelist.append(fname)
1128 1128
1129 1129 return fparent1
1130 1130
1131 1131 @unfilteredmethod
1132 1132 def commit(self, text="", user=None, date=None, match=None, force=False,
1133 1133 editor=False, extra={}):
1134 1134 """Add a new revision to current repository.
1135 1135
1136 1136 Revision information is gathered from the working directory,
1137 1137 match can be used to filter the committed files. If editor is
1138 1138 supplied, it is called to get a commit message.
1139 1139 """
1140 1140
1141 1141 def fail(f, msg):
1142 1142 raise util.Abort('%s: %s' % (f, msg))
1143 1143
1144 1144 if not match:
1145 1145 match = matchmod.always(self.root, '')
1146 1146
1147 1147 if not force:
1148 1148 vdirs = []
1149 1149 match.explicitdir = vdirs.append
1150 1150 match.bad = fail
1151 1151
1152 1152 wlock = self.wlock()
1153 1153 try:
1154 1154 wctx = self[None]
1155 1155 merge = len(wctx.parents()) > 1
1156 1156
1157 1157 if (not force and merge and match and
1158 1158 (match.files() or match.anypats())):
1159 1159 raise util.Abort(_('cannot partially commit a merge '
1160 1160 '(do not specify files or patterns)'))
1161 1161
1162 1162 changes = self.status(match=match, clean=force)
1163 1163 if force:
1164 1164 changes[0].extend(changes[6]) # mq may commit unchanged files
1165 1165
1166 1166 # check subrepos
1167 1167 subs = []
1168 1168 commitsubs = set()
1169 1169 newstate = wctx.substate.copy()
1170 1170 # only manage subrepos and .hgsubstate if .hgsub is present
1171 1171 if '.hgsub' in wctx:
1172 1172 # we'll decide whether to track this ourselves, thanks
1173 1173 if '.hgsubstate' in changes[0]:
1174 1174 changes[0].remove('.hgsubstate')
1175 1175 if '.hgsubstate' in changes[2]:
1176 1176 changes[2].remove('.hgsubstate')
1177 1177
1178 1178 # compare current state to last committed state
1179 1179 # build new substate based on last committed state
1180 1180 oldstate = wctx.p1().substate
1181 1181 for s in sorted(newstate.keys()):
1182 1182 if not match(s):
1183 1183 # ignore working copy, use old state if present
1184 1184 if s in oldstate:
1185 1185 newstate[s] = oldstate[s]
1186 1186 continue
1187 1187 if not force:
1188 1188 raise util.Abort(
1189 1189 _("commit with new subrepo %s excluded") % s)
1190 1190 if wctx.sub(s).dirty(True):
1191 1191 if not self.ui.configbool('ui', 'commitsubrepos'):
1192 1192 raise util.Abort(
1193 1193 _("uncommitted changes in subrepo %s") % s,
1194 1194 hint=_("use --subrepos for recursive commit"))
1195 1195 subs.append(s)
1196 1196 commitsubs.add(s)
1197 1197 else:
1198 1198 bs = wctx.sub(s).basestate()
1199 1199 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1200 1200 if oldstate.get(s, (None, None, None))[1] != bs:
1201 1201 subs.append(s)
1202 1202
1203 1203 # check for removed subrepos
1204 1204 for p in wctx.parents():
1205 1205 r = [s for s in p.substate if s not in newstate]
1206 1206 subs += [s for s in r if match(s)]
1207 1207 if subs:
1208 1208 if (not match('.hgsub') and
1209 1209 '.hgsub' in (wctx.modified() + wctx.added())):
1210 1210 raise util.Abort(
1211 1211 _("can't commit subrepos without .hgsub"))
1212 1212 changes[0].insert(0, '.hgsubstate')
1213 1213
1214 1214 elif '.hgsub' in changes[2]:
1215 1215 # clean up .hgsubstate when .hgsub is removed
1216 1216 if ('.hgsubstate' in wctx and
1217 1217 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1218 1218 changes[2].insert(0, '.hgsubstate')
1219 1219
1220 1220 # make sure all explicit patterns are matched
1221 1221 if not force and match.files():
1222 1222 matched = set(changes[0] + changes[1] + changes[2])
1223 1223
1224 1224 for f in match.files():
1225 1225 f = self.dirstate.normalize(f)
1226 1226 if f == '.' or f in matched or f in wctx.substate:
1227 1227 continue
1228 1228 if f in changes[3]: # missing
1229 1229 fail(f, _('file not found!'))
1230 1230 if f in vdirs: # visited directory
1231 1231 d = f + '/'
1232 1232 for mf in matched:
1233 1233 if mf.startswith(d):
1234 1234 break
1235 1235 else:
1236 1236 fail(f, _("no match under directory!"))
1237 1237 elif f not in self.dirstate:
1238 1238 fail(f, _("file not tracked!"))
1239 1239
1240 1240 cctx = context.workingctx(self, text, user, date, extra, changes)
1241 1241
1242 1242 if (not force and not extra.get("close") and not merge
1243 1243 and not cctx.files()
1244 1244 and wctx.branch() == wctx.p1().branch()):
1245 1245 return None
1246 1246
1247 1247 if merge and cctx.deleted():
1248 1248 raise util.Abort(_("cannot commit merge with missing files"))
1249 1249
1250 1250 ms = mergemod.mergestate(self)
1251 1251 for f in changes[0]:
1252 1252 if f in ms and ms[f] == 'u':
1253 1253 raise util.Abort(_("unresolved merge conflicts "
1254 1254 "(see hg help resolve)"))
1255 1255
1256 1256 if editor:
1257 1257 cctx._text = editor(self, cctx, subs)
1258 1258 edited = (text != cctx._text)
1259 1259
1260 1260 # commit subs and write new state
1261 1261 if subs:
1262 1262 for s in sorted(commitsubs):
1263 1263 sub = wctx.sub(s)
1264 1264 self.ui.status(_('committing subrepository %s\n') %
1265 1265 subrepo.subrelpath(sub))
1266 1266 sr = sub.commit(cctx._text, user, date)
1267 1267 newstate[s] = (newstate[s][0], sr)
1268 1268 subrepo.writestate(self, newstate)
1269 1269
1270 1270 # Save commit message in case this transaction gets rolled back
1271 1271 # (e.g. by a pretxncommit hook). Leave the content alone on
1272 1272 # the assumption that the user will use the same editor again.
1273 1273 msgfn = self.savecommitmessage(cctx._text)
1274 1274
1275 1275 p1, p2 = self.dirstate.parents()
1276 1276 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1277 1277 try:
1278 1278 self.hook("precommit", throw=True, parent1=hookp1,
1279 1279 parent2=hookp2)
1280 1280 ret = self.commitctx(cctx, True)
1281 1281 except: # re-raises
1282 1282 if edited:
1283 1283 self.ui.write(
1284 1284 _('note: commit message saved in %s\n') % msgfn)
1285 1285 raise
1286 1286
1287 1287 # update bookmarks, dirstate and mergestate
1288 1288 bookmarks.update(self, [p1, p2], ret)
1289 1289 cctx.markcommitted(ret)
1290 1290 ms.reset()
1291 1291 finally:
1292 1292 wlock.release()
1293 1293
1294 1294 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1295 1295 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1296 1296 self._afterlock(commithook)
1297 1297 return ret
1298 1298
1299 1299 @unfilteredmethod
1300 1300 def commitctx(self, ctx, error=False):
1301 1301 """Add a new revision to current repository.
1302 1302 Revision information is passed via the context argument.
1303 1303 """
1304 1304
1305 1305 tr = lock = None
1306 1306 removed = list(ctx.removed())
1307 1307 p1, p2 = ctx.p1(), ctx.p2()
1308 1308 user = ctx.user()
1309 1309
1310 1310 lock = self.lock()
1311 1311 try:
1312 1312 tr = self.transaction("commit")
1313 1313 trp = weakref.proxy(tr)
1314 1314
1315 1315 if ctx.files():
1316 1316 m1 = p1.manifest().copy()
1317 1317 m2 = p2.manifest()
1318 1318
1319 1319 # check in files
1320 1320 new = {}
1321 1321 changed = []
1322 1322 linkrev = len(self)
1323 1323 for f in sorted(ctx.modified() + ctx.added()):
1324 1324 self.ui.note(f + "\n")
1325 1325 try:
1326 1326 fctx = ctx[f]
1327 1327 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1328 1328 changed)
1329 1329 m1.set(f, fctx.flags())
1330 1330 except OSError, inst:
1331 1331 self.ui.warn(_("trouble committing %s!\n") % f)
1332 1332 raise
1333 1333 except IOError, inst:
1334 1334 errcode = getattr(inst, 'errno', errno.ENOENT)
1335 1335 if error or errcode and errcode != errno.ENOENT:
1336 1336 self.ui.warn(_("trouble committing %s!\n") % f)
1337 1337 raise
1338 1338 else:
1339 1339 removed.append(f)
1340 1340
1341 1341 # update manifest
1342 1342 m1.update(new)
1343 1343 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1344 1344 drop = [f for f in removed if f in m1]
1345 1345 for f in drop:
1346 1346 del m1[f]
1347 1347 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1348 1348 p2.manifestnode(), (new, drop))
1349 1349 files = changed + removed
1350 1350 else:
1351 1351 mn = p1.manifestnode()
1352 1352 files = []
1353 1353
1354 1354 # update changelog
1355 1355 self.changelog.delayupdate()
1356 1356 n = self.changelog.add(mn, files, ctx.description(),
1357 1357 trp, p1.node(), p2.node(),
1358 1358 user, ctx.date(), ctx.extra().copy())
1359 1359 p = lambda: self.changelog.writepending() and self.root or ""
1360 1360 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1361 1361 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1362 1362 parent2=xp2, pending=p)
1363 1363 self.changelog.finalize(trp)
1364 1364 # set the new commit is proper phase
1365 1365 targetphase = phases.newcommitphase(self.ui)
1366 1366 if targetphase:
1367 1367 # retract boundary do not alter parent changeset.
1368 1368 # if a parent have higher the resulting phase will
1369 1369 # be compliant anyway
1370 1370 #
1371 1371 # if minimal phase was 0 we don't need to retract anything
1372 1372 phases.retractboundary(self, targetphase, [n])
1373 1373 tr.close()
1374 1374 branchmap.updatecache(self.filtered('served'))
1375 1375 return n
1376 1376 finally:
1377 1377 if tr:
1378 1378 tr.release()
1379 1379 lock.release()
1380 1380
1381 1381 @unfilteredmethod
1382 1382 def destroying(self):
1383 1383 '''Inform the repository that nodes are about to be destroyed.
1384 1384 Intended for use by strip and rollback, so there's a common
1385 1385 place for anything that has to be done before destroying history.
1386 1386
1387 1387 This is mostly useful for saving state that is in memory and waiting
1388 1388 to be flushed when the current lock is released. Because a call to
1389 1389 destroyed is imminent, the repo will be invalidated causing those
1390 1390 changes to stay in memory (waiting for the next unlock), or vanish
1391 1391 completely.
1392 1392 '''
1393 1393 # When using the same lock to commit and strip, the phasecache is left
1394 1394 # dirty after committing. Then when we strip, the repo is invalidated,
1395 1395 # causing those changes to disappear.
1396 1396 if '_phasecache' in vars(self):
1397 1397 self._phasecache.write()
1398 1398
1399 1399 @unfilteredmethod
1400 1400 def destroyed(self):
1401 1401 '''Inform the repository that nodes have been destroyed.
1402 1402 Intended for use by strip and rollback, so there's a common
1403 1403 place for anything that has to be done after destroying history.
1404 1404 '''
1405 1405 # When one tries to:
1406 1406 # 1) destroy nodes thus calling this method (e.g. strip)
1407 1407 # 2) use phasecache somewhere (e.g. commit)
1408 1408 #
1409 1409 # then 2) will fail because the phasecache contains nodes that were
1410 1410 # removed. We can either remove phasecache from the filecache,
1411 1411 # causing it to reload next time it is accessed, or simply filter
1412 1412 # the removed nodes now and write the updated cache.
1413 1413 self._phasecache.filterunknown(self)
1414 1414 self._phasecache.write()
1415 1415
1416 1416 # update the 'served' branch cache to help read only server process
1417 1417 # Thanks to branchcache collaboration this is done from the nearest
1418 1418 # filtered subset and it is expected to be fast.
1419 1419 branchmap.updatecache(self.filtered('served'))
1420 1420
1421 1421 # Ensure the persistent tag cache is updated. Doing it now
1422 1422 # means that the tag cache only has to worry about destroyed
1423 1423 # heads immediately after a strip/rollback. That in turn
1424 1424 # guarantees that "cachetip == currenttip" (comparing both rev
1425 1425 # and node) always means no nodes have been added or destroyed.
1426 1426
1427 1427 # XXX this is suboptimal when qrefresh'ing: we strip the current
1428 1428 # head, refresh the tag cache, then immediately add a new head.
1429 1429 # But I think doing it this way is necessary for the "instant
1430 1430 # tag cache retrieval" case to work.
1431 1431 self.invalidate()
1432 1432
1433 1433 def walk(self, match, node=None):
1434 1434 '''
1435 1435 walk recursively through the directory tree or a given
1436 1436 changeset, finding all files matched by the match
1437 1437 function
1438 1438 '''
1439 1439 return self[node].walk(match)
1440 1440
1441 1441 def status(self, node1='.', node2=None, match=None,
1442 1442 ignored=False, clean=False, unknown=False,
1443 1443 listsubrepos=False):
1444 1444 """return status of files between two nodes or node and working
1445 1445 directory.
1446 1446
1447 1447 If node1 is None, use the first dirstate parent instead.
1448 1448 If node2 is None, compare node1 with working directory.
1449 1449 """
1450 1450
1451 1451 def mfmatches(ctx):
1452 1452 mf = ctx.manifest().copy()
1453 1453 if match.always():
1454 1454 return mf
1455 1455 for fn in mf.keys():
1456 1456 if not match(fn):
1457 1457 del mf[fn]
1458 1458 return mf
1459 1459
1460 1460 if isinstance(node1, context.changectx):
1461 1461 ctx1 = node1
1462 1462 else:
1463 1463 ctx1 = self[node1]
1464 1464 if isinstance(node2, context.changectx):
1465 1465 ctx2 = node2
1466 1466 else:
1467 1467 ctx2 = self[node2]
1468 1468
1469 1469 working = ctx2.rev() is None
1470 1470 parentworking = working and ctx1 == self['.']
1471 1471 match = match or matchmod.always(self.root, self.getcwd())
1472 1472 listignored, listclean, listunknown = ignored, clean, unknown
1473 1473
1474 1474 # load earliest manifest first for caching reasons
1475 1475 if not working and ctx2.rev() < ctx1.rev():
1476 1476 ctx2.manifest()
1477 1477
1478 1478 if not parentworking:
1479 1479 def bad(f, msg):
1480 1480 # 'f' may be a directory pattern from 'match.files()',
1481 1481 # so 'f not in ctx1' is not enough
1482 1482 if f not in ctx1 and f not in ctx1.dirs():
1483 1483 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1484 1484 match.bad = bad
1485 1485
1486 1486 if working: # we need to scan the working dir
1487 1487 subrepos = []
1488 1488 if '.hgsub' in self.dirstate:
1489 1489 subrepos = sorted(ctx2.substate)
1490 1490 s = self.dirstate.status(match, subrepos, listignored,
1491 1491 listclean, listunknown)
1492 1492 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1493 1493
1494 1494 # check for any possibly clean files
1495 1495 if parentworking and cmp:
1496 1496 fixup = []
1497 1497 # do a full compare of any files that might have changed
1498 1498 for f in sorted(cmp):
1499 1499 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1500 1500 or ctx1[f].cmp(ctx2[f])):
1501 1501 modified.append(f)
1502 1502 else:
1503 1503 fixup.append(f)
1504 1504
1505 1505 # update dirstate for files that are actually clean
1506 1506 if fixup:
1507 1507 if listclean:
1508 1508 clean += fixup
1509 1509
1510 1510 try:
1511 1511 # updating the dirstate is optional
1512 1512 # so we don't wait on the lock
1513 1513 wlock = self.wlock(False)
1514 1514 try:
1515 1515 for f in fixup:
1516 1516 self.dirstate.normal(f)
1517 1517 finally:
1518 1518 wlock.release()
1519 1519 except error.LockError:
1520 1520 pass
1521 1521
1522 1522 if not parentworking:
1523 1523 mf1 = mfmatches(ctx1)
1524 1524 if working:
1525 1525 # we are comparing working dir against non-parent
1526 1526 # generate a pseudo-manifest for the working dir
1527 1527 mf2 = mfmatches(self['.'])
1528 1528 for f in cmp + modified + added:
1529 1529 mf2[f] = None
1530 1530 mf2.set(f, ctx2.flags(f))
1531 1531 for f in removed:
1532 1532 if f in mf2:
1533 1533 del mf2[f]
1534 1534 else:
1535 1535 # we are comparing two revisions
1536 1536 deleted, unknown, ignored = [], [], []
1537 1537 mf2 = mfmatches(ctx2)
1538 1538
1539 1539 modified, added, clean = [], [], []
1540 1540 withflags = mf1.withflags() | mf2.withflags()
1541 1541 for fn, mf2node in mf2.iteritems():
1542 1542 if fn in mf1:
1543 1543 if (fn not in deleted and
1544 1544 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1545 1545 (mf1[fn] != mf2node and
1546 1546 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1547 1547 modified.append(fn)
1548 1548 elif listclean:
1549 1549 clean.append(fn)
1550 1550 del mf1[fn]
1551 1551 elif fn not in deleted:
1552 1552 added.append(fn)
1553 1553 removed = mf1.keys()
1554 1554
1555 1555 if working and modified and not self.dirstate._checklink:
1556 1556 # Symlink placeholders may get non-symlink-like contents
1557 1557 # via user error or dereferencing by NFS or Samba servers,
1558 1558 # so we filter out any placeholders that don't look like a
1559 1559 # symlink
1560 1560 sane = []
1561 1561 for f in modified:
1562 1562 if ctx2.flags(f) == 'l':
1563 1563 d = ctx2[f].data()
1564 1564 if len(d) >= 1024 or '\n' in d or util.binary(d):
1565 1565 self.ui.debug('ignoring suspect symlink placeholder'
1566 1566 ' "%s"\n' % f)
1567 1567 continue
1568 1568 sane.append(f)
1569 1569 modified = sane
1570 1570
1571 1571 r = modified, added, removed, deleted, unknown, ignored, clean
1572 1572
1573 1573 if listsubrepos:
1574 1574 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1575 1575 if working:
1576 1576 rev2 = None
1577 1577 else:
1578 1578 rev2 = ctx2.substate[subpath][1]
1579 1579 try:
1580 1580 submatch = matchmod.narrowmatcher(subpath, match)
1581 1581 s = sub.status(rev2, match=submatch, ignored=listignored,
1582 1582 clean=listclean, unknown=listunknown,
1583 1583 listsubrepos=True)
1584 1584 for rfiles, sfiles in zip(r, s):
1585 1585 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1586 1586 except error.LookupError:
1587 1587 self.ui.status(_("skipping missing subrepository: %s\n")
1588 1588 % subpath)
1589 1589
1590 1590 for l in r:
1591 1591 l.sort()
1592 1592 return r
1593 1593
1594 1594 def heads(self, start=None):
1595 1595 heads = self.changelog.heads(start)
1596 1596 # sort the output in rev descending order
1597 1597 return sorted(heads, key=self.changelog.rev, reverse=True)
1598 1598
1599 1599 def branchheads(self, branch=None, start=None, closed=False):
1600 1600 '''return a (possibly filtered) list of heads for the given branch
1601 1601
1602 1602 Heads are returned in topological order, from newest to oldest.
1603 1603 If branch is None, use the dirstate branch.
1604 1604 If start is not None, return only heads reachable from start.
1605 1605 If closed is True, return heads that are marked as closed as well.
1606 1606 '''
1607 1607 if branch is None:
1608 1608 branch = self[None].branch()
1609 1609 branches = self.branchmap()
1610 1610 if branch not in branches:
1611 1611 return []
1612 1612 # the cache returns heads ordered lowest to highest
1613 1613 bheads = list(reversed(branches[branch]))
1614 1614 if start is not None:
1615 1615 # filter out the heads that cannot be reached from startrev
1616 1616 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1617 1617 bheads = [h for h in bheads if h in fbheads]
1618 1618 if not closed:
1619 1619 bheads = [h for h in bheads if not self[h].closesbranch()]
1620 1620 return bheads
1621 1621
1622 1622 def branches(self, nodes):
1623 1623 if not nodes:
1624 1624 nodes = [self.changelog.tip()]
1625 1625 b = []
1626 1626 for n in nodes:
1627 1627 t = n
1628 1628 while True:
1629 1629 p = self.changelog.parents(n)
1630 1630 if p[1] != nullid or p[0] == nullid:
1631 1631 b.append((t, n, p[0], p[1]))
1632 1632 break
1633 1633 n = p[0]
1634 1634 return b
1635 1635
1636 1636 def between(self, pairs):
1637 1637 r = []
1638 1638
1639 1639 for top, bottom in pairs:
1640 1640 n, l, i = top, [], 0
1641 1641 f = 1
1642 1642
1643 1643 while n != bottom and n != nullid:
1644 1644 p = self.changelog.parents(n)[0]
1645 1645 if i == f:
1646 1646 l.append(n)
1647 1647 f = f * 2
1648 1648 n = p
1649 1649 i += 1
1650 1650
1651 1651 r.append(l)
1652 1652
1653 1653 return r
1654 1654
1655 1655 def pull(self, remote, heads=None, force=False):
1656 1656 # don't open transaction for nothing or you break future useful
1657 1657 # rollback call
1658 1658 tr = None
1659 1659 trname = 'pull\n' + util.hidepassword(remote.url())
1660 1660 lock = self.lock()
1661 1661 try:
1662 1662 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1663 1663 force=force)
1664 1664 common, fetch, rheads = tmp
1665 1665 if not fetch:
1666 1666 self.ui.status(_("no changes found\n"))
1667 1667 added = []
1668 1668 result = 0
1669 1669 else:
1670 1670 tr = self.transaction(trname)
1671 1671 if heads is None and list(common) == [nullid]:
1672 1672 self.ui.status(_("requesting all changes\n"))
1673 1673 elif heads is None and remote.capable('changegroupsubset'):
1674 1674 # issue1320, avoid a race if remote changed after discovery
1675 1675 heads = rheads
1676 1676
1677 1677 if remote.capable('getbundle'):
1678 1678 # TODO: get bundlecaps from remote
1679 1679 cg = remote.getbundle('pull', common=common,
1680 1680 heads=heads or rheads)
1681 1681 elif heads is None:
1682 1682 cg = remote.changegroup(fetch, 'pull')
1683 1683 elif not remote.capable('changegroupsubset'):
1684 1684 raise util.Abort(_("partial pull cannot be done because "
1685 1685 "other repository doesn't support "
1686 1686 "changegroupsubset."))
1687 1687 else:
1688 1688 cg = remote.changegroupsubset(fetch, heads, 'pull')
1689 1689 # we use unfiltered changelog here because hidden revision must
1690 1690 # be taken in account for phase synchronization. They may
1691 1691 # becomes public and becomes visible again.
1692 1692 cl = self.unfiltered().changelog
1693 1693 clstart = len(cl)
1694 1694 result = self.addchangegroup(cg, 'pull', remote.url())
1695 1695 clend = len(cl)
1696 1696 added = [cl.node(r) for r in xrange(clstart, clend)]
1697 1697
1698 1698 # compute target subset
1699 1699 if heads is None:
1700 1700 # We pulled every thing possible
1701 1701 # sync on everything common
1702 1702 subset = common + added
1703 1703 else:
1704 1704 # We pulled a specific subset
1705 1705 # sync on this subset
1706 1706 subset = heads
1707 1707
1708 1708 # Get remote phases data from remote
1709 1709 remotephases = remote.listkeys('phases')
1710 1710 publishing = bool(remotephases.get('publishing', False))
1711 1711 if remotephases and not publishing:
1712 1712 # remote is new and unpublishing
1713 1713 pheads, _dr = phases.analyzeremotephases(self, subset,
1714 1714 remotephases)
1715 1715 phases.advanceboundary(self, phases.public, pheads)
1716 1716 phases.advanceboundary(self, phases.draft, subset)
1717 1717 else:
1718 1718 # Remote is old or publishing all common changesets
1719 1719 # should be seen as public
1720 1720 phases.advanceboundary(self, phases.public, subset)
1721 1721
1722 1722 def gettransaction():
1723 1723 if tr is None:
1724 1724 return self.transaction(trname)
1725 1725 return tr
1726 1726
1727 1727 obstr = obsolete.syncpull(self, remote, gettransaction)
1728 1728 if obstr is not None:
1729 1729 tr = obstr
1730 1730
1731 1731 if tr is not None:
1732 1732 tr.close()
1733 1733 finally:
1734 1734 if tr is not None:
1735 1735 tr.release()
1736 1736 lock.release()
1737 1737
1738 1738 return result
1739 1739
1740 1740 def checkpush(self, force, revs):
1741 1741 """Extensions can override this function if additional checks have
1742 1742 to be performed before pushing, or call it if they override push
1743 1743 command.
1744 1744 """
1745 1745 pass
1746 1746
1747 1747 def push(self, remote, force=False, revs=None, newbranch=False):
1748 1748 '''Push outgoing changesets (limited by revs) from the current
1749 1749 repository to remote. Return an integer:
1750 1750 - None means nothing to push
1751 1751 - 0 means HTTP error
1752 1752 - 1 means we pushed and remote head count is unchanged *or*
1753 1753 we have outgoing changesets but refused to push
1754 1754 - other values as described by addchangegroup()
1755 1755 '''
1756 1756 # there are two ways to push to remote repo:
1757 1757 #
1758 1758 # addchangegroup assumes local user can lock remote
1759 1759 # repo (local filesystem, old ssh servers).
1760 1760 #
1761 1761 # unbundle assumes local user cannot lock remote repo (new ssh
1762 1762 # servers, http servers).
1763 1763
1764 1764 if not remote.canpush():
1765 1765 raise util.Abort(_("destination does not support push"))
1766 1766 unfi = self.unfiltered()
1767 1767 def localphasemove(nodes, phase=phases.public):
1768 1768 """move <nodes> to <phase> in the local source repo"""
1769 1769 if locallock is not None:
1770 1770 phases.advanceboundary(self, phase, nodes)
1771 1771 else:
1772 1772 # repo is not locked, do not change any phases!
1773 1773 # Informs the user that phases should have been moved when
1774 1774 # applicable.
1775 1775 actualmoves = [n for n in nodes if phase < self[n].phase()]
1776 1776 phasestr = phases.phasenames[phase]
1777 1777 if actualmoves:
1778 1778 self.ui.status(_('cannot lock source repo, skipping local'
1779 1779 ' %s phase update\n') % phasestr)
1780 1780 # get local lock as we might write phase data
1781 1781 locallock = None
1782 1782 try:
1783 1783 locallock = self.lock()
1784 1784 except IOError, err:
1785 1785 if err.errno != errno.EACCES:
1786 1786 raise
1787 1787 # source repo cannot be locked.
1788 1788 # We do not abort the push, but just disable the local phase
1789 1789 # synchronisation.
1790 1790 msg = 'cannot lock source repository: %s\n' % err
1791 1791 self.ui.debug(msg)
1792 1792 try:
1793 1793 self.checkpush(force, revs)
1794 1794 lock = None
1795 1795 unbundle = remote.capable('unbundle')
1796 1796 if not unbundle:
1797 1797 lock = remote.lock()
1798 1798 try:
1799 1799 # discovery
1800 1800 fci = discovery.findcommonincoming
1801 1801 commoninc = fci(unfi, remote, force=force)
1802 1802 common, inc, remoteheads = commoninc
1803 1803 fco = discovery.findcommonoutgoing
1804 1804 outgoing = fco(unfi, remote, onlyheads=revs,
1805 1805 commoninc=commoninc, force=force)
1806 1806
1807 1807
1808 1808 if not outgoing.missing:
1809 1809 # nothing to push
1810 1810 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1811 1811 ret = None
1812 1812 else:
1813 1813 # something to push
1814 1814 if not force:
1815 1815 # if self.obsstore == False --> no obsolete
1816 1816 # then, save the iteration
1817 1817 if unfi.obsstore:
1818 1818 # this message are here for 80 char limit reason
1819 1819 mso = _("push includes obsolete changeset: %s!")
1820 1820 mst = "push includes %s changeset: %s!"
1821 1821 # plain versions for i18n tool to detect them
1822 1822 _("push includes unstable changeset: %s!")
1823 1823 _("push includes bumped changeset: %s!")
1824 1824 _("push includes divergent changeset: %s!")
1825 1825 # If we are to push if there is at least one
1826 1826 # obsolete or unstable changeset in missing, at
1827 1827 # least one of the missinghead will be obsolete or
1828 1828 # unstable. So checking heads only is ok
1829 1829 for node in outgoing.missingheads:
1830 1830 ctx = unfi[node]
1831 1831 if ctx.obsolete():
1832 1832 raise util.Abort(mso % ctx)
1833 1833 elif ctx.troubled():
1834 1834 raise util.Abort(_(mst)
1835 1835 % (ctx.troubles()[0],
1836 1836 ctx))
1837 1837 discovery.checkheads(unfi, remote, outgoing,
1838 1838 remoteheads, newbranch,
1839 1839 bool(inc))
1840 1840
1841 1841 # TODO: get bundlecaps from remote
1842 1842 bundlecaps = None
1843 1843 # create a changegroup from local
1844 1844 if revs is None and not outgoing.excluded:
1845 1845 # push everything,
1846 1846 # use the fast path, no race possible on push
1847 1847 bundler = changegroup.bundle10(self, bundlecaps)
1848 1848 cg = self._changegroupsubset(outgoing,
1849 1849 bundler,
1850 1850 'push',
1851 1851 fastpath=True)
1852 1852 else:
1853 1853 cg = self.getlocalbundle('push', outgoing, bundlecaps)
1854 1854
1855 1855 # apply changegroup to remote
1856 1856 if unbundle:
1857 1857 # local repo finds heads on server, finds out what
1858 1858 # revs it must push. once revs transferred, if server
1859 1859 # finds it has different heads (someone else won
1860 1860 # commit/push race), server aborts.
1861 1861 if force:
1862 1862 remoteheads = ['force']
1863 1863 # ssh: return remote's addchangegroup()
1864 1864 # http: return remote's addchangegroup() or 0 for error
1865 1865 ret = remote.unbundle(cg, remoteheads, 'push')
1866 1866 else:
1867 1867 # we return an integer indicating remote head count
1868 1868 # change
1869 1869 ret = remote.addchangegroup(cg, 'push', self.url())
1870 1870
1871 1871 if ret:
1872 1872 # push succeed, synchronize target of the push
1873 1873 cheads = outgoing.missingheads
1874 1874 elif revs is None:
1875 1875 # All out push fails. synchronize all common
1876 1876 cheads = outgoing.commonheads
1877 1877 else:
1878 1878 # I want cheads = heads(::missingheads and ::commonheads)
1879 1879 # (missingheads is revs with secret changeset filtered out)
1880 1880 #
1881 1881 # This can be expressed as:
1882 1882 # cheads = ( (missingheads and ::commonheads)
1883 1883 # + (commonheads and ::missingheads))"
1884 1884 # )
1885 1885 #
1886 1886 # while trying to push we already computed the following:
1887 1887 # common = (::commonheads)
1888 1888 # missing = ((commonheads::missingheads) - commonheads)
1889 1889 #
1890 1890 # We can pick:
1891 1891 # * missingheads part of common (::commonheads)
1892 1892 common = set(outgoing.common)
1893 1893 cheads = [node for node in revs if node in common]
1894 1894 # and
1895 1895 # * commonheads parents on missing
1896 1896 revset = unfi.set('%ln and parents(roots(%ln))',
1897 1897 outgoing.commonheads,
1898 1898 outgoing.missing)
1899 1899 cheads.extend(c.node() for c in revset)
1900 1900 # even when we don't push, exchanging phase data is useful
1901 1901 remotephases = remote.listkeys('phases')
1902 1902 if (self.ui.configbool('ui', '_usedassubrepo', False)
1903 1903 and remotephases # server supports phases
1904 1904 and ret is None # nothing was pushed
1905 1905 and remotephases.get('publishing', False)):
1906 1906 # When:
1907 1907 # - this is a subrepo push
1908 1908 # - and remote support phase
1909 1909 # - and no changeset was pushed
1910 1910 # - and remote is publishing
1911 1911 # We may be in issue 3871 case!
1912 1912 # We drop the possible phase synchronisation done by
1913 1913 # courtesy to publish changesets possibly locally draft
1914 1914 # on the remote.
1915 1915 remotephases = {'publishing': 'True'}
1916 1916 if not remotephases: # old server or public only repo
1917 1917 localphasemove(cheads)
1918 1918 # don't push any phase data as there is nothing to push
1919 1919 else:
1920 1920 ana = phases.analyzeremotephases(self, cheads, remotephases)
1921 1921 pheads, droots = ana
1922 1922 ### Apply remote phase on local
1923 1923 if remotephases.get('publishing', False):
1924 1924 localphasemove(cheads)
1925 1925 else: # publish = False
1926 1926 localphasemove(pheads)
1927 1927 localphasemove(cheads, phases.draft)
1928 1928 ### Apply local phase on remote
1929 1929
1930 1930 # Get the list of all revs draft on remote by public here.
1931 1931 # XXX Beware that revset break if droots is not strictly
1932 1932 # XXX root we may want to ensure it is but it is costly
1933 1933 outdated = unfi.set('heads((%ln::%ln) and public())',
1934 1934 droots, cheads)
1935 1935 for newremotehead in outdated:
1936 1936 r = remote.pushkey('phases',
1937 1937 newremotehead.hex(),
1938 1938 str(phases.draft),
1939 1939 str(phases.public))
1940 1940 if not r:
1941 1941 self.ui.warn(_('updating %s to public failed!\n')
1942 1942 % newremotehead)
1943 1943 self.ui.debug('try to push obsolete markers to remote\n')
1944 1944 obsolete.syncpush(self, remote)
1945 1945 finally:
1946 1946 if lock is not None:
1947 1947 lock.release()
1948 1948 finally:
1949 1949 if locallock is not None:
1950 1950 locallock.release()
1951 1951
1952 1952 self.ui.debug("checking for updated bookmarks\n")
1953 1953 rb = remote.listkeys('bookmarks')
1954 1954 for k in rb.keys():
1955 1955 if k in unfi._bookmarks:
1956 1956 nr, nl = rb[k], hex(self._bookmarks[k])
1957 1957 if nr in unfi:
1958 1958 cr = unfi[nr]
1959 1959 cl = unfi[nl]
1960 1960 if bookmarks.validdest(unfi, cr, cl):
1961 1961 r = remote.pushkey('bookmarks', k, nr, nl)
1962 1962 if r:
1963 1963 self.ui.status(_("updating bookmark %s\n") % k)
1964 1964 else:
1965 1965 self.ui.warn(_('updating bookmark %s'
1966 1966 ' failed!\n') % k)
1967 1967
1968 1968 return ret
1969 1969
1970 1970 def changegroupinfo(self, nodes, source):
1971 1971 if self.ui.verbose or source == 'bundle':
1972 1972 self.ui.status(_("%d changesets found\n") % len(nodes))
1973 1973 if self.ui.debugflag:
1974 1974 self.ui.debug("list of changesets:\n")
1975 1975 for node in nodes:
1976 1976 self.ui.debug("%s\n" % hex(node))
1977 1977
1978 1978 def changegroupsubset(self, bases, heads, source):
1979 1979 """Compute a changegroup consisting of all the nodes that are
1980 1980 descendants of any of the bases and ancestors of any of the heads.
1981 1981 Return a chunkbuffer object whose read() method will return
1982 1982 successive changegroup chunks.
1983 1983
1984 1984 It is fairly complex as determining which filenodes and which
1985 1985 manifest nodes need to be included for the changeset to be complete
1986 1986 is non-trivial.
1987 1987
1988 1988 Another wrinkle is doing the reverse, figuring out which changeset in
1989 1989 the changegroup a particular filenode or manifestnode belongs to.
1990 1990 """
1991 1991 cl = self.changelog
1992 1992 if not bases:
1993 1993 bases = [nullid]
1994 1994 # TODO: remove call to nodesbetween.
1995 1995 csets, bases, heads = cl.nodesbetween(bases, heads)
1996 1996 bases = [p for n in bases for p in cl.parents(n) if p != nullid]
1997 1997 outgoing = discovery.outgoing(cl, bases, heads)
1998 1998 bundler = changegroup.bundle10(self)
1999 1999 return self._changegroupsubset(outgoing, bundler, source)
2000 2000
2001 2001 def getlocalbundle(self, source, outgoing, bundlecaps=None):
2002 2002 """Like getbundle, but taking a discovery.outgoing as an argument.
2003 2003
2004 2004 This is only implemented for local repos and reuses potentially
2005 2005 precomputed sets in outgoing."""
2006 2006 if not outgoing.missing:
2007 2007 return None
2008 2008 bundler = changegroup.bundle10(self, bundlecaps)
2009 2009 return self._changegroupsubset(outgoing, bundler, source)
2010 2010
2011 2011 def getbundle(self, source, heads=None, common=None, bundlecaps=None):
2012 2012 """Like changegroupsubset, but returns the set difference between the
2013 2013 ancestors of heads and the ancestors common.
2014 2014
2015 2015 If heads is None, use the local heads. If common is None, use [nullid].
2016 2016
2017 2017 The nodes in common might not all be known locally due to the way the
2018 2018 current discovery protocol works.
2019 2019 """
2020 2020 cl = self.changelog
2021 2021 if common:
2022 2022 hasnode = cl.hasnode
2023 2023 common = [n for n in common if hasnode(n)]
2024 2024 else:
2025 2025 common = [nullid]
2026 2026 if not heads:
2027 2027 heads = cl.heads()
2028 2028 return self.getlocalbundle(source,
2029 2029 discovery.outgoing(cl, common, heads),
2030 2030 bundlecaps=bundlecaps)
2031 2031
2032 2032 @unfilteredmethod
2033 2033 def _changegroupsubset(self, outgoing, bundler, source,
2034 2034 fastpath=False):
2035 2035 commonrevs = outgoing.common
2036 2036 csets = outgoing.missing
2037 2037 heads = outgoing.missingheads
2038 2038 # We go through the fast path if we get told to, or if all (unfiltered
2039 2039 # heads have been requested (since we then know there all linkrevs will
2040 2040 # be pulled by the client).
2041 2041 heads.sort()
2042 2042 fastpathlinkrev = fastpath or (
2043 2043 self.filtername is None and heads == sorted(self.heads()))
2044 2044
2045 2045 self.hook('preoutgoing', throw=True, source=source)
2046 2046 self.changegroupinfo(csets, source)
2047 2047 gengroup = bundler.generate(commonrevs, csets, fastpathlinkrev, source)
2048 2048 return changegroup.unbundle10(util.chunkbuffer(gengroup), 'UN')
2049 2049
2050 2050 def changegroup(self, basenodes, source):
2051 2051 # to avoid a race we use changegroupsubset() (issue1320)
2052 2052 return self.changegroupsubset(basenodes, self.heads(), source)
2053 2053
2054 2054 @unfilteredmethod
2055 2055 def addchangegroup(self, source, srctype, url, emptyok=False):
2056 2056 """Add the changegroup returned by source.read() to this repo.
2057 2057 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2058 2058 the URL of the repo where this changegroup is coming from.
2059 2059
2060 2060 Return an integer summarizing the change to this repo:
2061 2061 - nothing changed or no source: 0
2062 2062 - more heads than before: 1+added heads (2..n)
2063 2063 - fewer heads than before: -1-removed heads (-2..-n)
2064 2064 - number of heads stays the same: 1
2065 2065 """
2066 2066 def csmap(x):
2067 2067 self.ui.debug("add changeset %s\n" % short(x))
2068 2068 return len(cl)
2069 2069
2070 2070 def revmap(x):
2071 2071 return cl.rev(x)
2072 2072
2073 2073 if not source:
2074 2074 return 0
2075 2075
2076 2076 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2077 2077
2078 2078 changesets = files = revisions = 0
2079 2079 efiles = set()
2080 2080
2081 2081 # write changelog data to temp files so concurrent readers will not see
2082 2082 # inconsistent view
2083 2083 cl = self.changelog
2084 2084 cl.delayupdate()
2085 2085 oldheads = cl.heads()
2086 2086
2087 2087 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2088 2088 try:
2089 2089 trp = weakref.proxy(tr)
2090 2090 # pull off the changeset group
2091 2091 self.ui.status(_("adding changesets\n"))
2092 2092 clstart = len(cl)
2093 2093 class prog(object):
2094 2094 step = _('changesets')
2095 2095 count = 1
2096 2096 ui = self.ui
2097 2097 total = None
2098 2098 def __call__(self):
2099 2099 self.ui.progress(self.step, self.count, unit=_('chunks'),
2100 2100 total=self.total)
2101 2101 self.count += 1
2102 2102 pr = prog()
2103 2103 source.callback = pr
2104 2104
2105 2105 source.changelogheader()
2106 2106 srccontent = cl.addgroup(source, csmap, trp)
2107 2107 if not (srccontent or emptyok):
2108 2108 raise util.Abort(_("received changelog group is empty"))
2109 2109 clend = len(cl)
2110 2110 changesets = clend - clstart
2111 2111 for c in xrange(clstart, clend):
2112 2112 efiles.update(self[c].files())
2113 2113 efiles = len(efiles)
2114 2114 self.ui.progress(_('changesets'), None)
2115 2115
2116 2116 # pull off the manifest group
2117 2117 self.ui.status(_("adding manifests\n"))
2118 2118 pr.step = _('manifests')
2119 2119 pr.count = 1
2120 2120 pr.total = changesets # manifests <= changesets
2121 2121 # no need to check for empty manifest group here:
2122 2122 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2123 2123 # no new manifest will be created and the manifest group will
2124 2124 # be empty during the pull
2125 2125 source.manifestheader()
2126 2126 self.manifest.addgroup(source, revmap, trp)
2127 2127 self.ui.progress(_('manifests'), None)
2128 2128
2129 2129 needfiles = {}
2130 2130 if self.ui.configbool('server', 'validate', default=False):
2131 2131 # validate incoming csets have their manifests
2132 2132 for cset in xrange(clstart, clend):
2133 2133 mfest = self.changelog.read(self.changelog.node(cset))[0]
2134 2134 mfest = self.manifest.readdelta(mfest)
2135 2135 # store file nodes we must see
2136 2136 for f, n in mfest.iteritems():
2137 2137 needfiles.setdefault(f, set()).add(n)
2138 2138
2139 2139 # process the files
2140 2140 self.ui.status(_("adding file changes\n"))
2141 2141 pr.step = _('files')
2142 2142 pr.count = 1
2143 2143 pr.total = efiles
2144 2144 source.callback = None
2145 2145
2146 while True:
2147 chunkdata = source.filelogheader()
2148 if not chunkdata:
2149 break
2150 f = chunkdata["filename"]
2151 self.ui.debug("adding %s revisions\n" % f)
2152 pr()
2153 fl = self.file(f)
2154 o = len(fl)
2155 if not fl.addgroup(source, revmap, trp):
2156 raise util.Abort(_("received file revlog group is empty"))
2157 revisions += len(fl) - o
2158 files += 1
2159 if f in needfiles:
2160 needs = needfiles[f]
2161 for new in xrange(o, len(fl)):
2162 n = fl.node(new)
2163 if n in needs:
2164 needs.remove(n)
2165 else:
2166 raise util.Abort(
2167 _("received spurious file revlog entry"))
2168 if not needs:
2169 del needfiles[f]
2170 self.ui.progress(_('files'), None)
2171
2172 for f, needs in needfiles.iteritems():
2173 fl = self.file(f)
2174 for n in needs:
2175 try:
2176 fl.rev(n)
2177 except error.LookupError:
2178 raise util.Abort(
2179 _('missing file data for %s:%s - run hg verify') %
2180 (f, hex(n)))
2146 newrevs, newfiles = self.addchangegroupfiles(source, revmap, trp,
2147 pr, needfiles)
2148 revisions += newrevs
2149 files += newfiles
2181 2150
2182 2151 dh = 0
2183 2152 if oldheads:
2184 2153 heads = cl.heads()
2185 2154 dh = len(heads) - len(oldheads)
2186 2155 for h in heads:
2187 2156 if h not in oldheads and self[h].closesbranch():
2188 2157 dh -= 1
2189 2158 htext = ""
2190 2159 if dh:
2191 2160 htext = _(" (%+d heads)") % dh
2192 2161
2193 2162 self.ui.status(_("added %d changesets"
2194 2163 " with %d changes to %d files%s\n")
2195 2164 % (changesets, revisions, files, htext))
2196 2165 self.invalidatevolatilesets()
2197 2166
2198 2167 if changesets > 0:
2199 2168 p = lambda: cl.writepending() and self.root or ""
2200 2169 self.hook('pretxnchangegroup', throw=True,
2201 2170 node=hex(cl.node(clstart)), source=srctype,
2202 2171 url=url, pending=p)
2203 2172
2204 2173 added = [cl.node(r) for r in xrange(clstart, clend)]
2205 2174 publishing = self.ui.configbool('phases', 'publish', True)
2206 2175 if srctype == 'push':
2207 2176 # Old server can not push the boundary themself.
2208 2177 # New server won't push the boundary if changeset already
2209 2178 # existed locally as secrete
2210 2179 #
2211 2180 # We should not use added here but the list of all change in
2212 2181 # the bundle
2213 2182 if publishing:
2214 2183 phases.advanceboundary(self, phases.public, srccontent)
2215 2184 else:
2216 2185 phases.advanceboundary(self, phases.draft, srccontent)
2217 2186 phases.retractboundary(self, phases.draft, added)
2218 2187 elif srctype != 'strip':
2219 2188 # publishing only alter behavior during push
2220 2189 #
2221 2190 # strip should not touch boundary at all
2222 2191 phases.retractboundary(self, phases.draft, added)
2223 2192
2224 2193 # make changelog see real files again
2225 2194 cl.finalize(trp)
2226 2195
2227 2196 tr.close()
2228 2197
2229 2198 if changesets > 0:
2230 2199 if srctype != 'strip':
2231 2200 # During strip, branchcache is invalid but coming call to
2232 2201 # `destroyed` will repair it.
2233 2202 # In other case we can safely update cache on disk.
2234 2203 branchmap.updatecache(self.filtered('served'))
2235 2204 def runhooks():
2236 2205 # forcefully update the on-disk branch cache
2237 2206 self.ui.debug("updating the branch cache\n")
2238 2207 self.hook("changegroup", node=hex(cl.node(clstart)),
2239 2208 source=srctype, url=url)
2240 2209
2241 2210 for n in added:
2242 2211 self.hook("incoming", node=hex(n), source=srctype,
2243 2212 url=url)
2244 2213
2245 2214 newheads = [h for h in self.heads() if h not in oldheads]
2246 2215 self.ui.log("incoming",
2247 2216 "%s incoming changes - new heads: %s\n",
2248 2217 len(added),
2249 2218 ', '.join([hex(c[:6]) for c in newheads]))
2250 2219 self._afterlock(runhooks)
2251 2220
2252 2221 finally:
2253 2222 tr.release()
2254 2223 # never return 0 here:
2255 2224 if dh < 0:
2256 2225 return dh - 1
2257 2226 else:
2258 2227 return dh + 1
2259 2228
2229 def addchangegroupfiles(self, source, revmap, trp, pr, needfiles):
2230 revisions = 0
2231 files = 0
2232 while True:
2233 chunkdata = source.filelogheader()
2234 if not chunkdata:
2235 break
2236 f = chunkdata["filename"]
2237 self.ui.debug("adding %s revisions\n" % f)
2238 pr()
2239 fl = self.file(f)
2240 o = len(fl)
2241 if not fl.addgroup(source, revmap, trp):
2242 raise util.Abort(_("received file revlog group is empty"))
2243 revisions += len(fl) - o
2244 files += 1
2245 if f in needfiles:
2246 needs = needfiles[f]
2247 for new in xrange(o, len(fl)):
2248 n = fl.node(new)
2249 if n in needs:
2250 needs.remove(n)
2251 else:
2252 raise util.Abort(
2253 _("received spurious file revlog entry"))
2254 if not needs:
2255 del needfiles[f]
2256 self.ui.progress(_('files'), None)
2257
2258 for f, needs in needfiles.iteritems():
2259 fl = self.file(f)
2260 for n in needs:
2261 try:
2262 fl.rev(n)
2263 except error.LookupError:
2264 raise util.Abort(
2265 _('missing file data for %s:%s - run hg verify') %
2266 (f, hex(n)))
2267
2268 return revisions, files
2269
2260 2270 def stream_in(self, remote, requirements):
2261 2271 lock = self.lock()
2262 2272 try:
2263 2273 # Save remote branchmap. We will use it later
2264 2274 # to speed up branchcache creation
2265 2275 rbranchmap = None
2266 2276 if remote.capable("branchmap"):
2267 2277 rbranchmap = remote.branchmap()
2268 2278
2269 2279 fp = remote.stream_out()
2270 2280 l = fp.readline()
2271 2281 try:
2272 2282 resp = int(l)
2273 2283 except ValueError:
2274 2284 raise error.ResponseError(
2275 2285 _('unexpected response from remote server:'), l)
2276 2286 if resp == 1:
2277 2287 raise util.Abort(_('operation forbidden by server'))
2278 2288 elif resp == 2:
2279 2289 raise util.Abort(_('locking the remote repository failed'))
2280 2290 elif resp != 0:
2281 2291 raise util.Abort(_('the server sent an unknown error code'))
2282 2292 self.ui.status(_('streaming all changes\n'))
2283 2293 l = fp.readline()
2284 2294 try:
2285 2295 total_files, total_bytes = map(int, l.split(' ', 1))
2286 2296 except (ValueError, TypeError):
2287 2297 raise error.ResponseError(
2288 2298 _('unexpected response from remote server:'), l)
2289 2299 self.ui.status(_('%d files to transfer, %s of data\n') %
2290 2300 (total_files, util.bytecount(total_bytes)))
2291 2301 handled_bytes = 0
2292 2302 self.ui.progress(_('clone'), 0, total=total_bytes)
2293 2303 start = time.time()
2294 2304 for i in xrange(total_files):
2295 2305 # XXX doesn't support '\n' or '\r' in filenames
2296 2306 l = fp.readline()
2297 2307 try:
2298 2308 name, size = l.split('\0', 1)
2299 2309 size = int(size)
2300 2310 except (ValueError, TypeError):
2301 2311 raise error.ResponseError(
2302 2312 _('unexpected response from remote server:'), l)
2303 2313 if self.ui.debugflag:
2304 2314 self.ui.debug('adding %s (%s)\n' %
2305 2315 (name, util.bytecount(size)))
2306 2316 # for backwards compat, name was partially encoded
2307 2317 ofp = self.sopener(store.decodedir(name), 'w')
2308 2318 for chunk in util.filechunkiter(fp, limit=size):
2309 2319 handled_bytes += len(chunk)
2310 2320 self.ui.progress(_('clone'), handled_bytes,
2311 2321 total=total_bytes)
2312 2322 ofp.write(chunk)
2313 2323 ofp.close()
2314 2324 elapsed = time.time() - start
2315 2325 if elapsed <= 0:
2316 2326 elapsed = 0.001
2317 2327 self.ui.progress(_('clone'), None)
2318 2328 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2319 2329 (util.bytecount(total_bytes), elapsed,
2320 2330 util.bytecount(total_bytes / elapsed)))
2321 2331
2322 2332 # new requirements = old non-format requirements +
2323 2333 # new format-related
2324 2334 # requirements from the streamed-in repository
2325 2335 requirements.update(set(self.requirements) - self.supportedformats)
2326 2336 self._applyrequirements(requirements)
2327 2337 self._writerequirements()
2328 2338
2329 2339 if rbranchmap:
2330 2340 rbheads = []
2331 2341 for bheads in rbranchmap.itervalues():
2332 2342 rbheads.extend(bheads)
2333 2343
2334 2344 if rbheads:
2335 2345 rtiprev = max((int(self.changelog.rev(node))
2336 2346 for node in rbheads))
2337 2347 cache = branchmap.branchcache(rbranchmap,
2338 2348 self[rtiprev].node(),
2339 2349 rtiprev)
2340 2350 # Try to stick it as low as possible
2341 2351 # filter above served are unlikely to be fetch from a clone
2342 2352 for candidate in ('base', 'immutable', 'served'):
2343 2353 rview = self.filtered(candidate)
2344 2354 if cache.validfor(rview):
2345 2355 self._branchcaches[candidate] = cache
2346 2356 cache.write(rview)
2347 2357 break
2348 2358 self.invalidate()
2349 2359 return len(self.heads()) + 1
2350 2360 finally:
2351 2361 lock.release()
2352 2362
2353 2363 def clone(self, remote, heads=[], stream=False):
2354 2364 '''clone remote repository.
2355 2365
2356 2366 keyword arguments:
2357 2367 heads: list of revs to clone (forces use of pull)
2358 2368 stream: use streaming clone if possible'''
2359 2369
2360 2370 # now, all clients that can request uncompressed clones can
2361 2371 # read repo formats supported by all servers that can serve
2362 2372 # them.
2363 2373
2364 2374 # if revlog format changes, client will have to check version
2365 2375 # and format flags on "stream" capability, and use
2366 2376 # uncompressed only if compatible.
2367 2377
2368 2378 if not stream:
2369 2379 # if the server explicitly prefers to stream (for fast LANs)
2370 2380 stream = remote.capable('stream-preferred')
2371 2381
2372 2382 if stream and not heads:
2373 2383 # 'stream' means remote revlog format is revlogv1 only
2374 2384 if remote.capable('stream'):
2375 2385 return self.stream_in(remote, set(('revlogv1',)))
2376 2386 # otherwise, 'streamreqs' contains the remote revlog format
2377 2387 streamreqs = remote.capable('streamreqs')
2378 2388 if streamreqs:
2379 2389 streamreqs = set(streamreqs.split(','))
2380 2390 # if we support it, stream in and adjust our requirements
2381 2391 if not streamreqs - self.supportedformats:
2382 2392 return self.stream_in(remote, streamreqs)
2383 2393 return self.pull(remote, heads)
2384 2394
2385 2395 def pushkey(self, namespace, key, old, new):
2386 2396 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2387 2397 old=old, new=new)
2388 2398 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2389 2399 ret = pushkey.push(self, namespace, key, old, new)
2390 2400 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2391 2401 ret=ret)
2392 2402 return ret
2393 2403
2394 2404 def listkeys(self, namespace):
2395 2405 self.hook('prelistkeys', throw=True, namespace=namespace)
2396 2406 self.ui.debug('listing keys for "%s"\n' % namespace)
2397 2407 values = pushkey.list(self, namespace)
2398 2408 self.hook('listkeys', namespace=namespace, values=values)
2399 2409 return values
2400 2410
2401 2411 def debugwireargs(self, one, two, three=None, four=None, five=None):
2402 2412 '''used to test argument passing over the wire'''
2403 2413 return "%s %s %s %s %s" % (one, two, three, four, five)
2404 2414
2405 2415 def savecommitmessage(self, text):
2406 2416 fp = self.opener('last-message.txt', 'wb')
2407 2417 try:
2408 2418 fp.write(text)
2409 2419 finally:
2410 2420 fp.close()
2411 2421 return self.pathto(fp.name[len(self.root) + 1:])
2412 2422
2413 2423 # used to avoid circular references so destructors work
2414 2424 def aftertrans(files):
2415 2425 renamefiles = [tuple(t) for t in files]
2416 2426 def a():
2417 2427 for vfs, src, dest in renamefiles:
2418 2428 try:
2419 2429 vfs.rename(src, dest)
2420 2430 except OSError: # journal file does not yet exist
2421 2431 pass
2422 2432 return a
2423 2433
2424 2434 def undoname(fn):
2425 2435 base, name = os.path.split(fn)
2426 2436 assert name.startswith('journal')
2427 2437 return os.path.join(base, name.replace('journal', 'undo', 1))
2428 2438
2429 2439 def instance(ui, path, create):
2430 2440 return localrepository(ui, util.urllocalpath(path), create)
2431 2441
2432 2442 def islocal(path):
2433 2443 return True
General Comments 0
You need to be logged in to leave comments. Login now