##// END OF EJS Templates
localrepo: use "vfs" intead of "opener" while ensuring repository directory...
FUJIWARA Katsunori -
r17160:22b9b1d2 default
parent child Browse files
Show More
@@ -1,2457 +1,2457 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 class localrepository(repo.repository):
27 27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 28 'known', 'getbundle'))
29 29 supportedformats = set(('revlogv1', 'generaldelta'))
30 30 supported = supportedformats | set(('store', 'fncache', 'shared',
31 31 'dotencode'))
32 32 openerreqs = set(('revlogv1', 'generaldelta'))
33 33 requirements = ['revlogv1']
34 34
35 35 def _baserequirements(self, create):
36 36 return self.requirements[:]
37 37
38 38 def __init__(self, baseui, path=None, create=False):
39 39 repo.repository.__init__(self)
40 40 self.wopener = scmutil.opener(path, expand=True)
41 41 self.wvfs = self.wopener
42 42 self.root = self.wvfs.base
43 43 self.path = self.wvfs.join(".hg")
44 44 self.origroot = path
45 45 self.auditor = scmutil.pathauditor(self.root, self._checknested)
46 46 self.opener = scmutil.opener(self.path)
47 47 self.vfs = self.opener
48 48 self.baseui = baseui
49 49 self.ui = baseui.copy()
50 50 # A list of callback to shape the phase if no data were found.
51 51 # Callback are in the form: func(repo, roots) --> processed root.
52 52 # This list it to be filled by extension during repo setup
53 53 self._phasedefaults = []
54 54
55 55 try:
56 56 self.ui.readconfig(self.join("hgrc"), self.root)
57 57 extensions.loadall(self.ui)
58 58 except IOError:
59 59 pass
60 60
61 61 if not os.path.isdir(self.path):
62 62 if create:
63 63 if not os.path.exists(self.root):
64 64 util.makedirs(self.root)
65 65 util.makedir(self.path, notindexed=True)
66 66 requirements = self._baserequirements(create)
67 67 if self.ui.configbool('format', 'usestore', True):
68 68 os.mkdir(os.path.join(self.path, "store"))
69 69 requirements.append("store")
70 70 if self.ui.configbool('format', 'usefncache', True):
71 71 requirements.append("fncache")
72 72 if self.ui.configbool('format', 'dotencode', True):
73 73 requirements.append('dotencode')
74 74 # create an invalid changelog
75 self.opener.append(
75 self.vfs.append(
76 76 "00changelog.i",
77 77 '\0\0\0\2' # represents revlogv2
78 78 ' dummy changelog to prevent using the old repo layout'
79 79 )
80 80 if self.ui.configbool('format', 'generaldelta', False):
81 81 requirements.append("generaldelta")
82 82 requirements = set(requirements)
83 83 else:
84 84 raise error.RepoError(_("repository %s not found") % path)
85 85 elif create:
86 86 raise error.RepoError(_("repository %s already exists") % path)
87 87 else:
88 88 try:
89 requirements = scmutil.readrequires(self.opener, self.supported)
89 requirements = scmutil.readrequires(self.vfs, self.supported)
90 90 except IOError, inst:
91 91 if inst.errno != errno.ENOENT:
92 92 raise
93 93 requirements = set()
94 94
95 95 self.sharedpath = self.path
96 96 try:
97 97 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
98 98 if not os.path.exists(s):
99 99 raise error.RepoError(
100 100 _('.hg/sharedpath points to nonexistent directory %s') % s)
101 101 self.sharedpath = s
102 102 except IOError, inst:
103 103 if inst.errno != errno.ENOENT:
104 104 raise
105 105
106 106 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
107 107 self.spath = self.store.path
108 108 self.sopener = self.store.opener
109 109 self.svfs = self.sopener
110 110 self.sjoin = self.store.join
111 111 self.opener.createmode = self.store.createmode
112 112 self._applyrequirements(requirements)
113 113 if create:
114 114 self._writerequirements()
115 115
116 116
117 117 self._branchcache = None
118 118 self._branchcachetip = None
119 119 self.filterpats = {}
120 120 self._datafilters = {}
121 121 self._transref = self._lockref = self._wlockref = None
122 122
123 123 # A cache for various files under .hg/ that tracks file changes,
124 124 # (used by the filecache decorator)
125 125 #
126 126 # Maps a property name to its util.filecacheentry
127 127 self._filecache = {}
128 128
129 129 def _applyrequirements(self, requirements):
130 130 self.requirements = requirements
131 131 self.sopener.options = dict((r, 1) for r in requirements
132 132 if r in self.openerreqs)
133 133
134 134 def _writerequirements(self):
135 135 reqfile = self.opener("requires", "w")
136 136 for r in self.requirements:
137 137 reqfile.write("%s\n" % r)
138 138 reqfile.close()
139 139
140 140 def _checknested(self, path):
141 141 """Determine if path is a legal nested repository."""
142 142 if not path.startswith(self.root):
143 143 return False
144 144 subpath = path[len(self.root) + 1:]
145 145 normsubpath = util.pconvert(subpath)
146 146
147 147 # XXX: Checking against the current working copy is wrong in
148 148 # the sense that it can reject things like
149 149 #
150 150 # $ hg cat -r 10 sub/x.txt
151 151 #
152 152 # if sub/ is no longer a subrepository in the working copy
153 153 # parent revision.
154 154 #
155 155 # However, it can of course also allow things that would have
156 156 # been rejected before, such as the above cat command if sub/
157 157 # is a subrepository now, but was a normal directory before.
158 158 # The old path auditor would have rejected by mistake since it
159 159 # panics when it sees sub/.hg/.
160 160 #
161 161 # All in all, checking against the working copy seems sensible
162 162 # since we want to prevent access to nested repositories on
163 163 # the filesystem *now*.
164 164 ctx = self[None]
165 165 parts = util.splitpath(subpath)
166 166 while parts:
167 167 prefix = '/'.join(parts)
168 168 if prefix in ctx.substate:
169 169 if prefix == normsubpath:
170 170 return True
171 171 else:
172 172 sub = ctx.sub(prefix)
173 173 return sub.checknested(subpath[len(prefix) + 1:])
174 174 else:
175 175 parts.pop()
176 176 return False
177 177
178 178 @filecache('bookmarks')
179 179 def _bookmarks(self):
180 180 return bookmarks.read(self)
181 181
182 182 @filecache('bookmarks.current')
183 183 def _bookmarkcurrent(self):
184 184 return bookmarks.readcurrent(self)
185 185
186 186 def _writebookmarks(self, marks):
187 187 bookmarks.write(self)
188 188
189 189 def bookmarkheads(self, bookmark):
190 190 name = bookmark.split('@', 1)[0]
191 191 heads = []
192 192 for mark, n in self._bookmarks.iteritems():
193 193 if mark.split('@', 1)[0] == name:
194 194 heads.append(n)
195 195 return heads
196 196
197 197 @storecache('phaseroots')
198 198 def _phasecache(self):
199 199 return phases.phasecache(self, self._phasedefaults)
200 200
201 201 @storecache('obsstore')
202 202 def obsstore(self):
203 203 store = obsolete.obsstore(self.sopener)
204 204 return store
205 205
206 206 @storecache('00changelog.i')
207 207 def changelog(self):
208 208 c = changelog.changelog(self.sopener)
209 209 if 'HG_PENDING' in os.environ:
210 210 p = os.environ['HG_PENDING']
211 211 if p.startswith(self.root):
212 212 c.readpending('00changelog.i.a')
213 213 return c
214 214
215 215 @storecache('00manifest.i')
216 216 def manifest(self):
217 217 return manifest.manifest(self.sopener)
218 218
219 219 @filecache('dirstate')
220 220 def dirstate(self):
221 221 warned = [0]
222 222 def validate(node):
223 223 try:
224 224 self.changelog.rev(node)
225 225 return node
226 226 except error.LookupError:
227 227 if not warned[0]:
228 228 warned[0] = True
229 229 self.ui.warn(_("warning: ignoring unknown"
230 230 " working parent %s!\n") % short(node))
231 231 return nullid
232 232
233 233 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
234 234
235 235 def __getitem__(self, changeid):
236 236 if changeid is None:
237 237 return context.workingctx(self)
238 238 return context.changectx(self, changeid)
239 239
240 240 def __contains__(self, changeid):
241 241 try:
242 242 return bool(self.lookup(changeid))
243 243 except error.RepoLookupError:
244 244 return False
245 245
246 246 def __nonzero__(self):
247 247 return True
248 248
249 249 def __len__(self):
250 250 return len(self.changelog)
251 251
252 252 def __iter__(self):
253 253 for i in xrange(len(self)):
254 254 yield i
255 255
256 256 def revs(self, expr, *args):
257 257 '''Return a list of revisions matching the given revset'''
258 258 expr = revset.formatspec(expr, *args)
259 259 m = revset.match(None, expr)
260 260 return [r for r in m(self, range(len(self)))]
261 261
262 262 def set(self, expr, *args):
263 263 '''
264 264 Yield a context for each matching revision, after doing arg
265 265 replacement via revset.formatspec
266 266 '''
267 267 for r in self.revs(expr, *args):
268 268 yield self[r]
269 269
270 270 def url(self):
271 271 return 'file:' + self.root
272 272
273 273 def hook(self, name, throw=False, **args):
274 274 return hook.hook(self.ui, self, name, throw, **args)
275 275
276 276 tag_disallowed = ':\r\n'
277 277
278 278 def _tag(self, names, node, message, local, user, date, extra={}):
279 279 if isinstance(names, str):
280 280 allchars = names
281 281 names = (names,)
282 282 else:
283 283 allchars = ''.join(names)
284 284 for c in self.tag_disallowed:
285 285 if c in allchars:
286 286 raise util.Abort(_('%r cannot be used in a tag name') % c)
287 287
288 288 branches = self.branchmap()
289 289 for name in names:
290 290 self.hook('pretag', throw=True, node=hex(node), tag=name,
291 291 local=local)
292 292 if name in branches:
293 293 self.ui.warn(_("warning: tag %s conflicts with existing"
294 294 " branch name\n") % name)
295 295
296 296 def writetags(fp, names, munge, prevtags):
297 297 fp.seek(0, 2)
298 298 if prevtags and prevtags[-1] != '\n':
299 299 fp.write('\n')
300 300 for name in names:
301 301 m = munge and munge(name) or name
302 302 if (self._tagscache.tagtypes and
303 303 name in self._tagscache.tagtypes):
304 304 old = self.tags().get(name, nullid)
305 305 fp.write('%s %s\n' % (hex(old), m))
306 306 fp.write('%s %s\n' % (hex(node), m))
307 307 fp.close()
308 308
309 309 prevtags = ''
310 310 if local:
311 311 try:
312 312 fp = self.opener('localtags', 'r+')
313 313 except IOError:
314 314 fp = self.opener('localtags', 'a')
315 315 else:
316 316 prevtags = fp.read()
317 317
318 318 # local tags are stored in the current charset
319 319 writetags(fp, names, None, prevtags)
320 320 for name in names:
321 321 self.hook('tag', node=hex(node), tag=name, local=local)
322 322 return
323 323
324 324 try:
325 325 fp = self.wfile('.hgtags', 'rb+')
326 326 except IOError, e:
327 327 if e.errno != errno.ENOENT:
328 328 raise
329 329 fp = self.wfile('.hgtags', 'ab')
330 330 else:
331 331 prevtags = fp.read()
332 332
333 333 # committed tags are stored in UTF-8
334 334 writetags(fp, names, encoding.fromlocal, prevtags)
335 335
336 336 fp.close()
337 337
338 338 self.invalidatecaches()
339 339
340 340 if '.hgtags' not in self.dirstate:
341 341 self[None].add(['.hgtags'])
342 342
343 343 m = matchmod.exact(self.root, '', ['.hgtags'])
344 344 tagnode = self.commit(message, user, date, extra=extra, match=m)
345 345
346 346 for name in names:
347 347 self.hook('tag', node=hex(node), tag=name, local=local)
348 348
349 349 return tagnode
350 350
351 351 def tag(self, names, node, message, local, user, date):
352 352 '''tag a revision with one or more symbolic names.
353 353
354 354 names is a list of strings or, when adding a single tag, names may be a
355 355 string.
356 356
357 357 if local is True, the tags are stored in a per-repository file.
358 358 otherwise, they are stored in the .hgtags file, and a new
359 359 changeset is committed with the change.
360 360
361 361 keyword arguments:
362 362
363 363 local: whether to store tags in non-version-controlled file
364 364 (default False)
365 365
366 366 message: commit message to use if committing
367 367
368 368 user: name of user to use if committing
369 369
370 370 date: date tuple to use if committing'''
371 371
372 372 if not local:
373 373 for x in self.status()[:5]:
374 374 if '.hgtags' in x:
375 375 raise util.Abort(_('working copy of .hgtags is changed '
376 376 '(please commit .hgtags manually)'))
377 377
378 378 self.tags() # instantiate the cache
379 379 self._tag(names, node, message, local, user, date)
380 380
381 381 @propertycache
382 382 def _tagscache(self):
383 383 '''Returns a tagscache object that contains various tags related
384 384 caches.'''
385 385
386 386 # This simplifies its cache management by having one decorated
387 387 # function (this one) and the rest simply fetch things from it.
388 388 class tagscache(object):
389 389 def __init__(self):
390 390 # These two define the set of tags for this repository. tags
391 391 # maps tag name to node; tagtypes maps tag name to 'global' or
392 392 # 'local'. (Global tags are defined by .hgtags across all
393 393 # heads, and local tags are defined in .hg/localtags.)
394 394 # They constitute the in-memory cache of tags.
395 395 self.tags = self.tagtypes = None
396 396
397 397 self.nodetagscache = self.tagslist = None
398 398
399 399 cache = tagscache()
400 400 cache.tags, cache.tagtypes = self._findtags()
401 401
402 402 return cache
403 403
404 404 def tags(self):
405 405 '''return a mapping of tag to node'''
406 406 t = {}
407 407 for k, v in self._tagscache.tags.iteritems():
408 408 try:
409 409 # ignore tags to unknown nodes
410 410 self.changelog.rev(v)
411 411 t[k] = v
412 412 except (error.LookupError, ValueError):
413 413 pass
414 414 return t
415 415
416 416 def _findtags(self):
417 417 '''Do the hard work of finding tags. Return a pair of dicts
418 418 (tags, tagtypes) where tags maps tag name to node, and tagtypes
419 419 maps tag name to a string like \'global\' or \'local\'.
420 420 Subclasses or extensions are free to add their own tags, but
421 421 should be aware that the returned dicts will be retained for the
422 422 duration of the localrepo object.'''
423 423
424 424 # XXX what tagtype should subclasses/extensions use? Currently
425 425 # mq and bookmarks add tags, but do not set the tagtype at all.
426 426 # Should each extension invent its own tag type? Should there
427 427 # be one tagtype for all such "virtual" tags? Or is the status
428 428 # quo fine?
429 429
430 430 alltags = {} # map tag name to (node, hist)
431 431 tagtypes = {}
432 432
433 433 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
434 434 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
435 435
436 436 # Build the return dicts. Have to re-encode tag names because
437 437 # the tags module always uses UTF-8 (in order not to lose info
438 438 # writing to the cache), but the rest of Mercurial wants them in
439 439 # local encoding.
440 440 tags = {}
441 441 for (name, (node, hist)) in alltags.iteritems():
442 442 if node != nullid:
443 443 tags[encoding.tolocal(name)] = node
444 444 tags['tip'] = self.changelog.tip()
445 445 tagtypes = dict([(encoding.tolocal(name), value)
446 446 for (name, value) in tagtypes.iteritems()])
447 447 return (tags, tagtypes)
448 448
449 449 def tagtype(self, tagname):
450 450 '''
451 451 return the type of the given tag. result can be:
452 452
453 453 'local' : a local tag
454 454 'global' : a global tag
455 455 None : tag does not exist
456 456 '''
457 457
458 458 return self._tagscache.tagtypes.get(tagname)
459 459
460 460 def tagslist(self):
461 461 '''return a list of tags ordered by revision'''
462 462 if not self._tagscache.tagslist:
463 463 l = []
464 464 for t, n in self.tags().iteritems():
465 465 r = self.changelog.rev(n)
466 466 l.append((r, t, n))
467 467 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
468 468
469 469 return self._tagscache.tagslist
470 470
471 471 def nodetags(self, node):
472 472 '''return the tags associated with a node'''
473 473 if not self._tagscache.nodetagscache:
474 474 nodetagscache = {}
475 475 for t, n in self._tagscache.tags.iteritems():
476 476 nodetagscache.setdefault(n, []).append(t)
477 477 for tags in nodetagscache.itervalues():
478 478 tags.sort()
479 479 self._tagscache.nodetagscache = nodetagscache
480 480 return self._tagscache.nodetagscache.get(node, [])
481 481
482 482 def nodebookmarks(self, node):
483 483 marks = []
484 484 for bookmark, n in self._bookmarks.iteritems():
485 485 if n == node:
486 486 marks.append(bookmark)
487 487 return sorted(marks)
488 488
489 489 def _branchtags(self, partial, lrev):
490 490 # TODO: rename this function?
491 491 tiprev = len(self) - 1
492 492 if lrev != tiprev:
493 493 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
494 494 self._updatebranchcache(partial, ctxgen)
495 495 self._writebranchcache(partial, self.changelog.tip(), tiprev)
496 496
497 497 return partial
498 498
499 499 def updatebranchcache(self):
500 500 tip = self.changelog.tip()
501 501 if self._branchcache is not None and self._branchcachetip == tip:
502 502 return
503 503
504 504 oldtip = self._branchcachetip
505 505 self._branchcachetip = tip
506 506 if oldtip is None or oldtip not in self.changelog.nodemap:
507 507 partial, last, lrev = self._readbranchcache()
508 508 else:
509 509 lrev = self.changelog.rev(oldtip)
510 510 partial = self._branchcache
511 511
512 512 self._branchtags(partial, lrev)
513 513 # this private cache holds all heads (not just the branch tips)
514 514 self._branchcache = partial
515 515
516 516 def branchmap(self):
517 517 '''returns a dictionary {branch: [branchheads]}'''
518 518 self.updatebranchcache()
519 519 return self._branchcache
520 520
521 521 def _branchtip(self, heads):
522 522 '''return the tipmost branch head in heads'''
523 523 tip = heads[-1]
524 524 for h in reversed(heads):
525 525 if not self[h].closesbranch():
526 526 tip = h
527 527 break
528 528 return tip
529 529
530 530 def branchtip(self, branch):
531 531 '''return the tip node for a given branch'''
532 532 if branch not in self.branchmap():
533 533 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
534 534 return self._branchtip(self.branchmap()[branch])
535 535
536 536 def branchtags(self):
537 537 '''return a dict where branch names map to the tipmost head of
538 538 the branch, open heads come before closed'''
539 539 bt = {}
540 540 for bn, heads in self.branchmap().iteritems():
541 541 bt[bn] = self._branchtip(heads)
542 542 return bt
543 543
544 544 def _readbranchcache(self):
545 545 partial = {}
546 546 try:
547 547 f = self.opener("cache/branchheads")
548 548 lines = f.read().split('\n')
549 549 f.close()
550 550 except (IOError, OSError):
551 551 return {}, nullid, nullrev
552 552
553 553 try:
554 554 last, lrev = lines.pop(0).split(" ", 1)
555 555 last, lrev = bin(last), int(lrev)
556 556 if lrev >= len(self) or self[lrev].node() != last:
557 557 # invalidate the cache
558 558 raise ValueError('invalidating branch cache (tip differs)')
559 559 for l in lines:
560 560 if not l:
561 561 continue
562 562 node, label = l.split(" ", 1)
563 563 label = encoding.tolocal(label.strip())
564 564 if not node in self:
565 565 raise ValueError('invalidating branch cache because node '+
566 566 '%s does not exist' % node)
567 567 partial.setdefault(label, []).append(bin(node))
568 568 except KeyboardInterrupt:
569 569 raise
570 570 except Exception, inst:
571 571 if self.ui.debugflag:
572 572 self.ui.warn(str(inst), '\n')
573 573 partial, last, lrev = {}, nullid, nullrev
574 574 return partial, last, lrev
575 575
576 576 def _writebranchcache(self, branches, tip, tiprev):
577 577 try:
578 578 f = self.opener("cache/branchheads", "w", atomictemp=True)
579 579 f.write("%s %s\n" % (hex(tip), tiprev))
580 580 for label, nodes in branches.iteritems():
581 581 for node in nodes:
582 582 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
583 583 f.close()
584 584 except (IOError, OSError):
585 585 pass
586 586
587 587 def _updatebranchcache(self, partial, ctxgen):
588 588 """Given a branchhead cache, partial, that may have extra nodes or be
589 589 missing heads, and a generator of nodes that are at least a superset of
590 590 heads missing, this function updates partial to be correct.
591 591 """
592 592 # collect new branch entries
593 593 newbranches = {}
594 594 for c in ctxgen:
595 595 newbranches.setdefault(c.branch(), []).append(c.node())
596 596 # if older branchheads are reachable from new ones, they aren't
597 597 # really branchheads. Note checking parents is insufficient:
598 598 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
599 599 for branch, newnodes in newbranches.iteritems():
600 600 bheads = partial.setdefault(branch, [])
601 601 # Remove candidate heads that no longer are in the repo (e.g., as
602 602 # the result of a strip that just happened). Avoid using 'node in
603 603 # self' here because that dives down into branchcache code somewhat
604 604 # recrusively.
605 605 bheadrevs = [self.changelog.rev(node) for node in bheads
606 606 if self.changelog.hasnode(node)]
607 607 newheadrevs = [self.changelog.rev(node) for node in newnodes
608 608 if self.changelog.hasnode(node)]
609 609 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
610 610 # Remove duplicates - nodes that are in newheadrevs and are already
611 611 # in bheadrevs. This can happen if you strip a node whose parent
612 612 # was already a head (because they're on different branches).
613 613 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
614 614
615 615 # Starting from tip means fewer passes over reachable. If we know
616 616 # the new candidates are not ancestors of existing heads, we don't
617 617 # have to examine ancestors of existing heads
618 618 if ctxisnew:
619 619 iterrevs = sorted(newheadrevs)
620 620 else:
621 621 iterrevs = list(bheadrevs)
622 622
623 623 # This loop prunes out two kinds of heads - heads that are
624 624 # superceded by a head in newheadrevs, and newheadrevs that are not
625 625 # heads because an existing head is their descendant.
626 626 while iterrevs:
627 627 latest = iterrevs.pop()
628 628 if latest not in bheadrevs:
629 629 continue
630 630 ancestors = set(self.changelog.ancestors([latest],
631 631 bheadrevs[0]))
632 632 if ancestors:
633 633 bheadrevs = [b for b in bheadrevs if b not in ancestors]
634 634 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
635 635
636 636 # There may be branches that cease to exist when the last commit in the
637 637 # branch was stripped. This code filters them out. Note that the
638 638 # branch that ceased to exist may not be in newbranches because
639 639 # newbranches is the set of candidate heads, which when you strip the
640 640 # last commit in a branch will be the parent branch.
641 641 for branch in partial:
642 642 nodes = [head for head in partial[branch]
643 643 if self.changelog.hasnode(head)]
644 644 if not nodes:
645 645 del partial[branch]
646 646
647 647 def lookup(self, key):
648 648 return self[key].node()
649 649
650 650 def lookupbranch(self, key, remote=None):
651 651 repo = remote or self
652 652 if key in repo.branchmap():
653 653 return key
654 654
655 655 repo = (remote and remote.local()) and remote or self
656 656 return repo[key].branch()
657 657
658 658 def known(self, nodes):
659 659 nm = self.changelog.nodemap
660 660 pc = self._phasecache
661 661 result = []
662 662 for n in nodes:
663 663 r = nm.get(n)
664 664 resp = not (r is None or pc.phase(self, r) >= phases.secret)
665 665 result.append(resp)
666 666 return result
667 667
668 668 def local(self):
669 669 return self
670 670
671 671 def join(self, f):
672 672 return os.path.join(self.path, f)
673 673
674 674 def wjoin(self, f):
675 675 return os.path.join(self.root, f)
676 676
677 677 def file(self, f):
678 678 if f[0] == '/':
679 679 f = f[1:]
680 680 return filelog.filelog(self.sopener, f)
681 681
682 682 def changectx(self, changeid):
683 683 return self[changeid]
684 684
685 685 def parents(self, changeid=None):
686 686 '''get list of changectxs for parents of changeid'''
687 687 return self[changeid].parents()
688 688
689 689 def setparents(self, p1, p2=nullid):
690 690 copies = self.dirstate.setparents(p1, p2)
691 691 if copies:
692 692 # Adjust copy records, the dirstate cannot do it, it
693 693 # requires access to parents manifests. Preserve them
694 694 # only for entries added to first parent.
695 695 pctx = self[p1]
696 696 for f in copies:
697 697 if f not in pctx and copies[f] in pctx:
698 698 self.dirstate.copy(copies[f], f)
699 699
700 700 def filectx(self, path, changeid=None, fileid=None):
701 701 """changeid can be a changeset revision, node, or tag.
702 702 fileid can be a file revision or node."""
703 703 return context.filectx(self, path, changeid, fileid)
704 704
705 705 def getcwd(self):
706 706 return self.dirstate.getcwd()
707 707
708 708 def pathto(self, f, cwd=None):
709 709 return self.dirstate.pathto(f, cwd)
710 710
711 711 def wfile(self, f, mode='r'):
712 712 return self.wopener(f, mode)
713 713
714 714 def _link(self, f):
715 715 return os.path.islink(self.wjoin(f))
716 716
717 717 def _loadfilter(self, filter):
718 718 if filter not in self.filterpats:
719 719 l = []
720 720 for pat, cmd in self.ui.configitems(filter):
721 721 if cmd == '!':
722 722 continue
723 723 mf = matchmod.match(self.root, '', [pat])
724 724 fn = None
725 725 params = cmd
726 726 for name, filterfn in self._datafilters.iteritems():
727 727 if cmd.startswith(name):
728 728 fn = filterfn
729 729 params = cmd[len(name):].lstrip()
730 730 break
731 731 if not fn:
732 732 fn = lambda s, c, **kwargs: util.filter(s, c)
733 733 # Wrap old filters not supporting keyword arguments
734 734 if not inspect.getargspec(fn)[2]:
735 735 oldfn = fn
736 736 fn = lambda s, c, **kwargs: oldfn(s, c)
737 737 l.append((mf, fn, params))
738 738 self.filterpats[filter] = l
739 739 return self.filterpats[filter]
740 740
741 741 def _filter(self, filterpats, filename, data):
742 742 for mf, fn, cmd in filterpats:
743 743 if mf(filename):
744 744 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
745 745 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
746 746 break
747 747
748 748 return data
749 749
750 750 @propertycache
751 751 def _encodefilterpats(self):
752 752 return self._loadfilter('encode')
753 753
754 754 @propertycache
755 755 def _decodefilterpats(self):
756 756 return self._loadfilter('decode')
757 757
758 758 def adddatafilter(self, name, filter):
759 759 self._datafilters[name] = filter
760 760
761 761 def wread(self, filename):
762 762 if self._link(filename):
763 763 data = os.readlink(self.wjoin(filename))
764 764 else:
765 765 data = self.wopener.read(filename)
766 766 return self._filter(self._encodefilterpats, filename, data)
767 767
768 768 def wwrite(self, filename, data, flags):
769 769 data = self._filter(self._decodefilterpats, filename, data)
770 770 if 'l' in flags:
771 771 self.wopener.symlink(data, filename)
772 772 else:
773 773 self.wopener.write(filename, data)
774 774 if 'x' in flags:
775 775 util.setflags(self.wjoin(filename), False, True)
776 776
777 777 def wwritedata(self, filename, data):
778 778 return self._filter(self._decodefilterpats, filename, data)
779 779
780 780 def transaction(self, desc):
781 781 tr = self._transref and self._transref() or None
782 782 if tr and tr.running():
783 783 return tr.nest()
784 784
785 785 # abort here if the journal already exists
786 786 if os.path.exists(self.sjoin("journal")):
787 787 raise error.RepoError(
788 788 _("abandoned transaction found - run hg recover"))
789 789
790 790 self._writejournal(desc)
791 791 renames = [(x, undoname(x)) for x in self._journalfiles()]
792 792
793 793 tr = transaction.transaction(self.ui.warn, self.sopener,
794 794 self.sjoin("journal"),
795 795 aftertrans(renames),
796 796 self.store.createmode)
797 797 self._transref = weakref.ref(tr)
798 798 return tr
799 799
800 800 def _journalfiles(self):
801 801 return (self.sjoin('journal'), self.join('journal.dirstate'),
802 802 self.join('journal.branch'), self.join('journal.desc'),
803 803 self.join('journal.bookmarks'),
804 804 self.sjoin('journal.phaseroots'))
805 805
806 806 def undofiles(self):
807 807 return [undoname(x) for x in self._journalfiles()]
808 808
809 809 def _writejournal(self, desc):
810 810 self.opener.write("journal.dirstate",
811 811 self.opener.tryread("dirstate"))
812 812 self.opener.write("journal.branch",
813 813 encoding.fromlocal(self.dirstate.branch()))
814 814 self.opener.write("journal.desc",
815 815 "%d\n%s\n" % (len(self), desc))
816 816 self.opener.write("journal.bookmarks",
817 817 self.opener.tryread("bookmarks"))
818 818 self.sopener.write("journal.phaseroots",
819 819 self.sopener.tryread("phaseroots"))
820 820
821 821 def recover(self):
822 822 lock = self.lock()
823 823 try:
824 824 if os.path.exists(self.sjoin("journal")):
825 825 self.ui.status(_("rolling back interrupted transaction\n"))
826 826 transaction.rollback(self.sopener, self.sjoin("journal"),
827 827 self.ui.warn)
828 828 self.invalidate()
829 829 return True
830 830 else:
831 831 self.ui.warn(_("no interrupted transaction available\n"))
832 832 return False
833 833 finally:
834 834 lock.release()
835 835
836 836 def rollback(self, dryrun=False, force=False):
837 837 wlock = lock = None
838 838 try:
839 839 wlock = self.wlock()
840 840 lock = self.lock()
841 841 if os.path.exists(self.sjoin("undo")):
842 842 return self._rollback(dryrun, force)
843 843 else:
844 844 self.ui.warn(_("no rollback information available\n"))
845 845 return 1
846 846 finally:
847 847 release(lock, wlock)
848 848
849 849 def _rollback(self, dryrun, force):
850 850 ui = self.ui
851 851 try:
852 852 args = self.opener.read('undo.desc').splitlines()
853 853 (oldlen, desc, detail) = (int(args[0]), args[1], None)
854 854 if len(args) >= 3:
855 855 detail = args[2]
856 856 oldtip = oldlen - 1
857 857
858 858 if detail and ui.verbose:
859 859 msg = (_('repository tip rolled back to revision %s'
860 860 ' (undo %s: %s)\n')
861 861 % (oldtip, desc, detail))
862 862 else:
863 863 msg = (_('repository tip rolled back to revision %s'
864 864 ' (undo %s)\n')
865 865 % (oldtip, desc))
866 866 except IOError:
867 867 msg = _('rolling back unknown transaction\n')
868 868 desc = None
869 869
870 870 if not force and self['.'] != self['tip'] and desc == 'commit':
871 871 raise util.Abort(
872 872 _('rollback of last commit while not checked out '
873 873 'may lose data'), hint=_('use -f to force'))
874 874
875 875 ui.status(msg)
876 876 if dryrun:
877 877 return 0
878 878
879 879 parents = self.dirstate.parents()
880 880 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
881 881 if os.path.exists(self.join('undo.bookmarks')):
882 882 util.rename(self.join('undo.bookmarks'),
883 883 self.join('bookmarks'))
884 884 if os.path.exists(self.sjoin('undo.phaseroots')):
885 885 util.rename(self.sjoin('undo.phaseroots'),
886 886 self.sjoin('phaseroots'))
887 887 self.invalidate()
888 888
889 889 parentgone = (parents[0] not in self.changelog.nodemap or
890 890 parents[1] not in self.changelog.nodemap)
891 891 if parentgone:
892 892 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
893 893 try:
894 894 branch = self.opener.read('undo.branch')
895 895 self.dirstate.setbranch(branch)
896 896 except IOError:
897 897 ui.warn(_('named branch could not be reset: '
898 898 'current branch is still \'%s\'\n')
899 899 % self.dirstate.branch())
900 900
901 901 self.dirstate.invalidate()
902 902 parents = tuple([p.rev() for p in self.parents()])
903 903 if len(parents) > 1:
904 904 ui.status(_('working directory now based on '
905 905 'revisions %d and %d\n') % parents)
906 906 else:
907 907 ui.status(_('working directory now based on '
908 908 'revision %d\n') % parents)
909 909 # TODO: if we know which new heads may result from this rollback, pass
910 910 # them to destroy(), which will prevent the branchhead cache from being
911 911 # invalidated.
912 912 self.destroyed()
913 913 return 0
914 914
915 915 def invalidatecaches(self):
916 916 def delcache(name):
917 917 try:
918 918 delattr(self, name)
919 919 except AttributeError:
920 920 pass
921 921
922 922 delcache('_tagscache')
923 923
924 924 self._branchcache = None # in UTF-8
925 925 self._branchcachetip = None
926 926
927 927 def invalidatedirstate(self):
928 928 '''Invalidates the dirstate, causing the next call to dirstate
929 929 to check if it was modified since the last time it was read,
930 930 rereading it if it has.
931 931
932 932 This is different to dirstate.invalidate() that it doesn't always
933 933 rereads the dirstate. Use dirstate.invalidate() if you want to
934 934 explicitly read the dirstate again (i.e. restoring it to a previous
935 935 known good state).'''
936 936 if 'dirstate' in self.__dict__:
937 937 for k in self.dirstate._filecache:
938 938 try:
939 939 delattr(self.dirstate, k)
940 940 except AttributeError:
941 941 pass
942 942 delattr(self, 'dirstate')
943 943
944 944 def invalidate(self):
945 945 for k in self._filecache:
946 946 # dirstate is invalidated separately in invalidatedirstate()
947 947 if k == 'dirstate':
948 948 continue
949 949
950 950 try:
951 951 delattr(self, k)
952 952 except AttributeError:
953 953 pass
954 954 self.invalidatecaches()
955 955
956 956 # Discard all cache entries to force reloading everything.
957 957 self._filecache.clear()
958 958
959 959 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
960 960 try:
961 961 l = lock.lock(lockname, 0, releasefn, desc=desc)
962 962 except error.LockHeld, inst:
963 963 if not wait:
964 964 raise
965 965 self.ui.warn(_("waiting for lock on %s held by %r\n") %
966 966 (desc, inst.locker))
967 967 # default to 600 seconds timeout
968 968 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
969 969 releasefn, desc=desc)
970 970 if acquirefn:
971 971 acquirefn()
972 972 return l
973 973
974 974 def _afterlock(self, callback):
975 975 """add a callback to the current repository lock.
976 976
977 977 The callback will be executed on lock release."""
978 978 l = self._lockref and self._lockref()
979 979 if l:
980 980 l.postrelease.append(callback)
981 981 else:
982 982 callback()
983 983
984 984 def lock(self, wait=True):
985 985 '''Lock the repository store (.hg/store) and return a weak reference
986 986 to the lock. Use this before modifying the store (e.g. committing or
987 987 stripping). If you are opening a transaction, get a lock as well.)'''
988 988 l = self._lockref and self._lockref()
989 989 if l is not None and l.held:
990 990 l.lock()
991 991 return l
992 992
993 993 def unlock():
994 994 self.store.write()
995 995 if '_phasecache' in vars(self):
996 996 self._phasecache.write()
997 997 for k, ce in self._filecache.items():
998 998 if k == 'dirstate':
999 999 continue
1000 1000 ce.refresh()
1001 1001
1002 1002 l = self._lock(self.sjoin("lock"), wait, unlock,
1003 1003 self.invalidate, _('repository %s') % self.origroot)
1004 1004 self._lockref = weakref.ref(l)
1005 1005 return l
1006 1006
1007 1007 def wlock(self, wait=True):
1008 1008 '''Lock the non-store parts of the repository (everything under
1009 1009 .hg except .hg/store) and return a weak reference to the lock.
1010 1010 Use this before modifying files in .hg.'''
1011 1011 l = self._wlockref and self._wlockref()
1012 1012 if l is not None and l.held:
1013 1013 l.lock()
1014 1014 return l
1015 1015
1016 1016 def unlock():
1017 1017 self.dirstate.write()
1018 1018 ce = self._filecache.get('dirstate')
1019 1019 if ce:
1020 1020 ce.refresh()
1021 1021
1022 1022 l = self._lock(self.join("wlock"), wait, unlock,
1023 1023 self.invalidatedirstate, _('working directory of %s') %
1024 1024 self.origroot)
1025 1025 self._wlockref = weakref.ref(l)
1026 1026 return l
1027 1027
1028 1028 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1029 1029 """
1030 1030 commit an individual file as part of a larger transaction
1031 1031 """
1032 1032
1033 1033 fname = fctx.path()
1034 1034 text = fctx.data()
1035 1035 flog = self.file(fname)
1036 1036 fparent1 = manifest1.get(fname, nullid)
1037 1037 fparent2 = fparent2o = manifest2.get(fname, nullid)
1038 1038
1039 1039 meta = {}
1040 1040 copy = fctx.renamed()
1041 1041 if copy and copy[0] != fname:
1042 1042 # Mark the new revision of this file as a copy of another
1043 1043 # file. This copy data will effectively act as a parent
1044 1044 # of this new revision. If this is a merge, the first
1045 1045 # parent will be the nullid (meaning "look up the copy data")
1046 1046 # and the second one will be the other parent. For example:
1047 1047 #
1048 1048 # 0 --- 1 --- 3 rev1 changes file foo
1049 1049 # \ / rev2 renames foo to bar and changes it
1050 1050 # \- 2 -/ rev3 should have bar with all changes and
1051 1051 # should record that bar descends from
1052 1052 # bar in rev2 and foo in rev1
1053 1053 #
1054 1054 # this allows this merge to succeed:
1055 1055 #
1056 1056 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1057 1057 # \ / merging rev3 and rev4 should use bar@rev2
1058 1058 # \- 2 --- 4 as the merge base
1059 1059 #
1060 1060
1061 1061 cfname = copy[0]
1062 1062 crev = manifest1.get(cfname)
1063 1063 newfparent = fparent2
1064 1064
1065 1065 if manifest2: # branch merge
1066 1066 if fparent2 == nullid or crev is None: # copied on remote side
1067 1067 if cfname in manifest2:
1068 1068 crev = manifest2[cfname]
1069 1069 newfparent = fparent1
1070 1070
1071 1071 # find source in nearest ancestor if we've lost track
1072 1072 if not crev:
1073 1073 self.ui.debug(" %s: searching for copy revision for %s\n" %
1074 1074 (fname, cfname))
1075 1075 for ancestor in self[None].ancestors():
1076 1076 if cfname in ancestor:
1077 1077 crev = ancestor[cfname].filenode()
1078 1078 break
1079 1079
1080 1080 if crev:
1081 1081 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1082 1082 meta["copy"] = cfname
1083 1083 meta["copyrev"] = hex(crev)
1084 1084 fparent1, fparent2 = nullid, newfparent
1085 1085 else:
1086 1086 self.ui.warn(_("warning: can't find ancestor for '%s' "
1087 1087 "copied from '%s'!\n") % (fname, cfname))
1088 1088
1089 1089 elif fparent2 != nullid:
1090 1090 # is one parent an ancestor of the other?
1091 1091 fparentancestor = flog.ancestor(fparent1, fparent2)
1092 1092 if fparentancestor == fparent1:
1093 1093 fparent1, fparent2 = fparent2, nullid
1094 1094 elif fparentancestor == fparent2:
1095 1095 fparent2 = nullid
1096 1096
1097 1097 # is the file changed?
1098 1098 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1099 1099 changelist.append(fname)
1100 1100 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1101 1101
1102 1102 # are just the flags changed during merge?
1103 1103 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1104 1104 changelist.append(fname)
1105 1105
1106 1106 return fparent1
1107 1107
1108 1108 def commit(self, text="", user=None, date=None, match=None, force=False,
1109 1109 editor=False, extra={}):
1110 1110 """Add a new revision to current repository.
1111 1111
1112 1112 Revision information is gathered from the working directory,
1113 1113 match can be used to filter the committed files. If editor is
1114 1114 supplied, it is called to get a commit message.
1115 1115 """
1116 1116
1117 1117 def fail(f, msg):
1118 1118 raise util.Abort('%s: %s' % (f, msg))
1119 1119
1120 1120 if not match:
1121 1121 match = matchmod.always(self.root, '')
1122 1122
1123 1123 if not force:
1124 1124 vdirs = []
1125 1125 match.dir = vdirs.append
1126 1126 match.bad = fail
1127 1127
1128 1128 wlock = self.wlock()
1129 1129 try:
1130 1130 wctx = self[None]
1131 1131 merge = len(wctx.parents()) > 1
1132 1132
1133 1133 if (not force and merge and match and
1134 1134 (match.files() or match.anypats())):
1135 1135 raise util.Abort(_('cannot partially commit a merge '
1136 1136 '(do not specify files or patterns)'))
1137 1137
1138 1138 changes = self.status(match=match, clean=force)
1139 1139 if force:
1140 1140 changes[0].extend(changes[6]) # mq may commit unchanged files
1141 1141
1142 1142 # check subrepos
1143 1143 subs = []
1144 1144 commitsubs = set()
1145 1145 newstate = wctx.substate.copy()
1146 1146 # only manage subrepos and .hgsubstate if .hgsub is present
1147 1147 if '.hgsub' in wctx:
1148 1148 # we'll decide whether to track this ourselves, thanks
1149 1149 if '.hgsubstate' in changes[0]:
1150 1150 changes[0].remove('.hgsubstate')
1151 1151 if '.hgsubstate' in changes[2]:
1152 1152 changes[2].remove('.hgsubstate')
1153 1153
1154 1154 # compare current state to last committed state
1155 1155 # build new substate based on last committed state
1156 1156 oldstate = wctx.p1().substate
1157 1157 for s in sorted(newstate.keys()):
1158 1158 if not match(s):
1159 1159 # ignore working copy, use old state if present
1160 1160 if s in oldstate:
1161 1161 newstate[s] = oldstate[s]
1162 1162 continue
1163 1163 if not force:
1164 1164 raise util.Abort(
1165 1165 _("commit with new subrepo %s excluded") % s)
1166 1166 if wctx.sub(s).dirty(True):
1167 1167 if not self.ui.configbool('ui', 'commitsubrepos'):
1168 1168 raise util.Abort(
1169 1169 _("uncommitted changes in subrepo %s") % s,
1170 1170 hint=_("use --subrepos for recursive commit"))
1171 1171 subs.append(s)
1172 1172 commitsubs.add(s)
1173 1173 else:
1174 1174 bs = wctx.sub(s).basestate()
1175 1175 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1176 1176 if oldstate.get(s, (None, None, None))[1] != bs:
1177 1177 subs.append(s)
1178 1178
1179 1179 # check for removed subrepos
1180 1180 for p in wctx.parents():
1181 1181 r = [s for s in p.substate if s not in newstate]
1182 1182 subs += [s for s in r if match(s)]
1183 1183 if subs:
1184 1184 if (not match('.hgsub') and
1185 1185 '.hgsub' in (wctx.modified() + wctx.added())):
1186 1186 raise util.Abort(
1187 1187 _("can't commit subrepos without .hgsub"))
1188 1188 changes[0].insert(0, '.hgsubstate')
1189 1189
1190 1190 elif '.hgsub' in changes[2]:
1191 1191 # clean up .hgsubstate when .hgsub is removed
1192 1192 if ('.hgsubstate' in wctx and
1193 1193 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1194 1194 changes[2].insert(0, '.hgsubstate')
1195 1195
1196 1196 # make sure all explicit patterns are matched
1197 1197 if not force and match.files():
1198 1198 matched = set(changes[0] + changes[1] + changes[2])
1199 1199
1200 1200 for f in match.files():
1201 1201 if f == '.' or f in matched or f in wctx.substate:
1202 1202 continue
1203 1203 if f in changes[3]: # missing
1204 1204 fail(f, _('file not found!'))
1205 1205 if f in vdirs: # visited directory
1206 1206 d = f + '/'
1207 1207 for mf in matched:
1208 1208 if mf.startswith(d):
1209 1209 break
1210 1210 else:
1211 1211 fail(f, _("no match under directory!"))
1212 1212 elif f not in self.dirstate:
1213 1213 fail(f, _("file not tracked!"))
1214 1214
1215 1215 if (not force and not extra.get("close") and not merge
1216 1216 and not (changes[0] or changes[1] or changes[2])
1217 1217 and wctx.branch() == wctx.p1().branch()):
1218 1218 return None
1219 1219
1220 1220 if merge and changes[3]:
1221 1221 raise util.Abort(_("cannot commit merge with missing files"))
1222 1222
1223 1223 ms = mergemod.mergestate(self)
1224 1224 for f in changes[0]:
1225 1225 if f in ms and ms[f] == 'u':
1226 1226 raise util.Abort(_("unresolved merge conflicts "
1227 1227 "(see hg help resolve)"))
1228 1228
1229 1229 cctx = context.workingctx(self, text, user, date, extra, changes)
1230 1230 if editor:
1231 1231 cctx._text = editor(self, cctx, subs)
1232 1232 edited = (text != cctx._text)
1233 1233
1234 1234 # commit subs and write new state
1235 1235 if subs:
1236 1236 for s in sorted(commitsubs):
1237 1237 sub = wctx.sub(s)
1238 1238 self.ui.status(_('committing subrepository %s\n') %
1239 1239 subrepo.subrelpath(sub))
1240 1240 sr = sub.commit(cctx._text, user, date)
1241 1241 newstate[s] = (newstate[s][0], sr)
1242 1242 subrepo.writestate(self, newstate)
1243 1243
1244 1244 # Save commit message in case this transaction gets rolled back
1245 1245 # (e.g. by a pretxncommit hook). Leave the content alone on
1246 1246 # the assumption that the user will use the same editor again.
1247 1247 msgfn = self.savecommitmessage(cctx._text)
1248 1248
1249 1249 p1, p2 = self.dirstate.parents()
1250 1250 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1251 1251 try:
1252 1252 self.hook("precommit", throw=True, parent1=hookp1,
1253 1253 parent2=hookp2)
1254 1254 ret = self.commitctx(cctx, True)
1255 1255 except: # re-raises
1256 1256 if edited:
1257 1257 self.ui.write(
1258 1258 _('note: commit message saved in %s\n') % msgfn)
1259 1259 raise
1260 1260
1261 1261 # update bookmarks, dirstate and mergestate
1262 1262 bookmarks.update(self, [p1, p2], ret)
1263 1263 for f in changes[0] + changes[1]:
1264 1264 self.dirstate.normal(f)
1265 1265 for f in changes[2]:
1266 1266 self.dirstate.drop(f)
1267 1267 self.dirstate.setparents(ret)
1268 1268 ms.reset()
1269 1269 finally:
1270 1270 wlock.release()
1271 1271
1272 1272 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1273 1273 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1274 1274 self._afterlock(commithook)
1275 1275 return ret
1276 1276
1277 1277 def commitctx(self, ctx, error=False):
1278 1278 """Add a new revision to current repository.
1279 1279 Revision information is passed via the context argument.
1280 1280 """
1281 1281
1282 1282 tr = lock = None
1283 1283 removed = list(ctx.removed())
1284 1284 p1, p2 = ctx.p1(), ctx.p2()
1285 1285 user = ctx.user()
1286 1286
1287 1287 lock = self.lock()
1288 1288 try:
1289 1289 tr = self.transaction("commit")
1290 1290 trp = weakref.proxy(tr)
1291 1291
1292 1292 if ctx.files():
1293 1293 m1 = p1.manifest().copy()
1294 1294 m2 = p2.manifest()
1295 1295
1296 1296 # check in files
1297 1297 new = {}
1298 1298 changed = []
1299 1299 linkrev = len(self)
1300 1300 for f in sorted(ctx.modified() + ctx.added()):
1301 1301 self.ui.note(f + "\n")
1302 1302 try:
1303 1303 fctx = ctx[f]
1304 1304 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1305 1305 changed)
1306 1306 m1.set(f, fctx.flags())
1307 1307 except OSError, inst:
1308 1308 self.ui.warn(_("trouble committing %s!\n") % f)
1309 1309 raise
1310 1310 except IOError, inst:
1311 1311 errcode = getattr(inst, 'errno', errno.ENOENT)
1312 1312 if error or errcode and errcode != errno.ENOENT:
1313 1313 self.ui.warn(_("trouble committing %s!\n") % f)
1314 1314 raise
1315 1315 else:
1316 1316 removed.append(f)
1317 1317
1318 1318 # update manifest
1319 1319 m1.update(new)
1320 1320 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1321 1321 drop = [f for f in removed if f in m1]
1322 1322 for f in drop:
1323 1323 del m1[f]
1324 1324 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1325 1325 p2.manifestnode(), (new, drop))
1326 1326 files = changed + removed
1327 1327 else:
1328 1328 mn = p1.manifestnode()
1329 1329 files = []
1330 1330
1331 1331 # update changelog
1332 1332 self.changelog.delayupdate()
1333 1333 n = self.changelog.add(mn, files, ctx.description(),
1334 1334 trp, p1.node(), p2.node(),
1335 1335 user, ctx.date(), ctx.extra().copy())
1336 1336 p = lambda: self.changelog.writepending() and self.root or ""
1337 1337 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1338 1338 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1339 1339 parent2=xp2, pending=p)
1340 1340 self.changelog.finalize(trp)
1341 1341 # set the new commit is proper phase
1342 1342 targetphase = phases.newcommitphase(self.ui)
1343 1343 if targetphase:
1344 1344 # retract boundary do not alter parent changeset.
1345 1345 # if a parent have higher the resulting phase will
1346 1346 # be compliant anyway
1347 1347 #
1348 1348 # if minimal phase was 0 we don't need to retract anything
1349 1349 phases.retractboundary(self, targetphase, [n])
1350 1350 tr.close()
1351 1351 self.updatebranchcache()
1352 1352 return n
1353 1353 finally:
1354 1354 if tr:
1355 1355 tr.release()
1356 1356 lock.release()
1357 1357
1358 1358 def destroyed(self, newheadnodes=None):
1359 1359 '''Inform the repository that nodes have been destroyed.
1360 1360 Intended for use by strip and rollback, so there's a common
1361 1361 place for anything that has to be done after destroying history.
1362 1362
1363 1363 If you know the branchheadcache was uptodate before nodes were removed
1364 1364 and you also know the set of candidate new heads that may have resulted
1365 1365 from the destruction, you can set newheadnodes. This will enable the
1366 1366 code to update the branchheads cache, rather than having future code
1367 1367 decide it's invalid and regenrating it from scratch.
1368 1368 '''
1369 1369 # If we have info, newheadnodes, on how to update the branch cache, do
1370 1370 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1371 1371 # will be caught the next time it is read.
1372 1372 if newheadnodes:
1373 1373 tiprev = len(self) - 1
1374 1374 ctxgen = (self[node] for node in newheadnodes
1375 1375 if self.changelog.hasnode(node))
1376 1376 self._updatebranchcache(self._branchcache, ctxgen)
1377 1377 self._writebranchcache(self._branchcache, self.changelog.tip(),
1378 1378 tiprev)
1379 1379
1380 1380 # Ensure the persistent tag cache is updated. Doing it now
1381 1381 # means that the tag cache only has to worry about destroyed
1382 1382 # heads immediately after a strip/rollback. That in turn
1383 1383 # guarantees that "cachetip == currenttip" (comparing both rev
1384 1384 # and node) always means no nodes have been added or destroyed.
1385 1385
1386 1386 # XXX this is suboptimal when qrefresh'ing: we strip the current
1387 1387 # head, refresh the tag cache, then immediately add a new head.
1388 1388 # But I think doing it this way is necessary for the "instant
1389 1389 # tag cache retrieval" case to work.
1390 1390 self.invalidatecaches()
1391 1391
1392 1392 def walk(self, match, node=None):
1393 1393 '''
1394 1394 walk recursively through the directory tree or a given
1395 1395 changeset, finding all files matched by the match
1396 1396 function
1397 1397 '''
1398 1398 return self[node].walk(match)
1399 1399
1400 1400 def status(self, node1='.', node2=None, match=None,
1401 1401 ignored=False, clean=False, unknown=False,
1402 1402 listsubrepos=False):
1403 1403 """return status of files between two nodes or node and working
1404 1404 directory.
1405 1405
1406 1406 If node1 is None, use the first dirstate parent instead.
1407 1407 If node2 is None, compare node1 with working directory.
1408 1408 """
1409 1409
1410 1410 def mfmatches(ctx):
1411 1411 mf = ctx.manifest().copy()
1412 1412 if match.always():
1413 1413 return mf
1414 1414 for fn in mf.keys():
1415 1415 if not match(fn):
1416 1416 del mf[fn]
1417 1417 return mf
1418 1418
1419 1419 if isinstance(node1, context.changectx):
1420 1420 ctx1 = node1
1421 1421 else:
1422 1422 ctx1 = self[node1]
1423 1423 if isinstance(node2, context.changectx):
1424 1424 ctx2 = node2
1425 1425 else:
1426 1426 ctx2 = self[node2]
1427 1427
1428 1428 working = ctx2.rev() is None
1429 1429 parentworking = working and ctx1 == self['.']
1430 1430 match = match or matchmod.always(self.root, self.getcwd())
1431 1431 listignored, listclean, listunknown = ignored, clean, unknown
1432 1432
1433 1433 # load earliest manifest first for caching reasons
1434 1434 if not working and ctx2.rev() < ctx1.rev():
1435 1435 ctx2.manifest()
1436 1436
1437 1437 if not parentworking:
1438 1438 def bad(f, msg):
1439 1439 # 'f' may be a directory pattern from 'match.files()',
1440 1440 # so 'f not in ctx1' is not enough
1441 1441 if f not in ctx1 and f not in ctx1.dirs():
1442 1442 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1443 1443 match.bad = bad
1444 1444
1445 1445 if working: # we need to scan the working dir
1446 1446 subrepos = []
1447 1447 if '.hgsub' in self.dirstate:
1448 1448 subrepos = ctx2.substate.keys()
1449 1449 s = self.dirstate.status(match, subrepos, listignored,
1450 1450 listclean, listunknown)
1451 1451 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1452 1452
1453 1453 # check for any possibly clean files
1454 1454 if parentworking and cmp:
1455 1455 fixup = []
1456 1456 # do a full compare of any files that might have changed
1457 1457 for f in sorted(cmp):
1458 1458 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1459 1459 or ctx1[f].cmp(ctx2[f])):
1460 1460 modified.append(f)
1461 1461 else:
1462 1462 fixup.append(f)
1463 1463
1464 1464 # update dirstate for files that are actually clean
1465 1465 if fixup:
1466 1466 if listclean:
1467 1467 clean += fixup
1468 1468
1469 1469 try:
1470 1470 # updating the dirstate is optional
1471 1471 # so we don't wait on the lock
1472 1472 wlock = self.wlock(False)
1473 1473 try:
1474 1474 for f in fixup:
1475 1475 self.dirstate.normal(f)
1476 1476 finally:
1477 1477 wlock.release()
1478 1478 except error.LockError:
1479 1479 pass
1480 1480
1481 1481 if not parentworking:
1482 1482 mf1 = mfmatches(ctx1)
1483 1483 if working:
1484 1484 # we are comparing working dir against non-parent
1485 1485 # generate a pseudo-manifest for the working dir
1486 1486 mf2 = mfmatches(self['.'])
1487 1487 for f in cmp + modified + added:
1488 1488 mf2[f] = None
1489 1489 mf2.set(f, ctx2.flags(f))
1490 1490 for f in removed:
1491 1491 if f in mf2:
1492 1492 del mf2[f]
1493 1493 else:
1494 1494 # we are comparing two revisions
1495 1495 deleted, unknown, ignored = [], [], []
1496 1496 mf2 = mfmatches(ctx2)
1497 1497
1498 1498 modified, added, clean = [], [], []
1499 1499 withflags = mf1.withflags() | mf2.withflags()
1500 1500 for fn in mf2:
1501 1501 if fn in mf1:
1502 1502 if (fn not in deleted and
1503 1503 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1504 1504 (mf1[fn] != mf2[fn] and
1505 1505 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1506 1506 modified.append(fn)
1507 1507 elif listclean:
1508 1508 clean.append(fn)
1509 1509 del mf1[fn]
1510 1510 elif fn not in deleted:
1511 1511 added.append(fn)
1512 1512 removed = mf1.keys()
1513 1513
1514 1514 if working and modified and not self.dirstate._checklink:
1515 1515 # Symlink placeholders may get non-symlink-like contents
1516 1516 # via user error or dereferencing by NFS or Samba servers,
1517 1517 # so we filter out any placeholders that don't look like a
1518 1518 # symlink
1519 1519 sane = []
1520 1520 for f in modified:
1521 1521 if ctx2.flags(f) == 'l':
1522 1522 d = ctx2[f].data()
1523 1523 if len(d) >= 1024 or '\n' in d or util.binary(d):
1524 1524 self.ui.debug('ignoring suspect symlink placeholder'
1525 1525 ' "%s"\n' % f)
1526 1526 continue
1527 1527 sane.append(f)
1528 1528 modified = sane
1529 1529
1530 1530 r = modified, added, removed, deleted, unknown, ignored, clean
1531 1531
1532 1532 if listsubrepos:
1533 1533 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1534 1534 if working:
1535 1535 rev2 = None
1536 1536 else:
1537 1537 rev2 = ctx2.substate[subpath][1]
1538 1538 try:
1539 1539 submatch = matchmod.narrowmatcher(subpath, match)
1540 1540 s = sub.status(rev2, match=submatch, ignored=listignored,
1541 1541 clean=listclean, unknown=listunknown,
1542 1542 listsubrepos=True)
1543 1543 for rfiles, sfiles in zip(r, s):
1544 1544 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1545 1545 except error.LookupError:
1546 1546 self.ui.status(_("skipping missing subrepository: %s\n")
1547 1547 % subpath)
1548 1548
1549 1549 for l in r:
1550 1550 l.sort()
1551 1551 return r
1552 1552
1553 1553 def heads(self, start=None):
1554 1554 heads = self.changelog.heads(start)
1555 1555 # sort the output in rev descending order
1556 1556 return sorted(heads, key=self.changelog.rev, reverse=True)
1557 1557
1558 1558 def branchheads(self, branch=None, start=None, closed=False):
1559 1559 '''return a (possibly filtered) list of heads for the given branch
1560 1560
1561 1561 Heads are returned in topological order, from newest to oldest.
1562 1562 If branch is None, use the dirstate branch.
1563 1563 If start is not None, return only heads reachable from start.
1564 1564 If closed is True, return heads that are marked as closed as well.
1565 1565 '''
1566 1566 if branch is None:
1567 1567 branch = self[None].branch()
1568 1568 branches = self.branchmap()
1569 1569 if branch not in branches:
1570 1570 return []
1571 1571 # the cache returns heads ordered lowest to highest
1572 1572 bheads = list(reversed(branches[branch]))
1573 1573 if start is not None:
1574 1574 # filter out the heads that cannot be reached from startrev
1575 1575 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1576 1576 bheads = [h for h in bheads if h in fbheads]
1577 1577 if not closed:
1578 1578 bheads = [h for h in bheads if not self[h].closesbranch()]
1579 1579 return bheads
1580 1580
1581 1581 def branches(self, nodes):
1582 1582 if not nodes:
1583 1583 nodes = [self.changelog.tip()]
1584 1584 b = []
1585 1585 for n in nodes:
1586 1586 t = n
1587 1587 while True:
1588 1588 p = self.changelog.parents(n)
1589 1589 if p[1] != nullid or p[0] == nullid:
1590 1590 b.append((t, n, p[0], p[1]))
1591 1591 break
1592 1592 n = p[0]
1593 1593 return b
1594 1594
1595 1595 def between(self, pairs):
1596 1596 r = []
1597 1597
1598 1598 for top, bottom in pairs:
1599 1599 n, l, i = top, [], 0
1600 1600 f = 1
1601 1601
1602 1602 while n != bottom and n != nullid:
1603 1603 p = self.changelog.parents(n)[0]
1604 1604 if i == f:
1605 1605 l.append(n)
1606 1606 f = f * 2
1607 1607 n = p
1608 1608 i += 1
1609 1609
1610 1610 r.append(l)
1611 1611
1612 1612 return r
1613 1613
1614 1614 def pull(self, remote, heads=None, force=False):
1615 1615 # don't open transaction for nothing or you break future useful
1616 1616 # rollback call
1617 1617 tr = None
1618 1618 trname = 'pull\n' + util.hidepassword(remote.url())
1619 1619 lock = self.lock()
1620 1620 try:
1621 1621 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1622 1622 force=force)
1623 1623 common, fetch, rheads = tmp
1624 1624 if not fetch:
1625 1625 self.ui.status(_("no changes found\n"))
1626 1626 added = []
1627 1627 result = 0
1628 1628 else:
1629 1629 tr = self.transaction(trname)
1630 1630 if heads is None and list(common) == [nullid]:
1631 1631 self.ui.status(_("requesting all changes\n"))
1632 1632 elif heads is None and remote.capable('changegroupsubset'):
1633 1633 # issue1320, avoid a race if remote changed after discovery
1634 1634 heads = rheads
1635 1635
1636 1636 if remote.capable('getbundle'):
1637 1637 cg = remote.getbundle('pull', common=common,
1638 1638 heads=heads or rheads)
1639 1639 elif heads is None:
1640 1640 cg = remote.changegroup(fetch, 'pull')
1641 1641 elif not remote.capable('changegroupsubset'):
1642 1642 raise util.Abort(_("partial pull cannot be done because "
1643 1643 "other repository doesn't support "
1644 1644 "changegroupsubset."))
1645 1645 else:
1646 1646 cg = remote.changegroupsubset(fetch, heads, 'pull')
1647 1647 clstart = len(self.changelog)
1648 1648 result = self.addchangegroup(cg, 'pull', remote.url())
1649 1649 clend = len(self.changelog)
1650 1650 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1651 1651
1652 1652 # compute target subset
1653 1653 if heads is None:
1654 1654 # We pulled every thing possible
1655 1655 # sync on everything common
1656 1656 subset = common + added
1657 1657 else:
1658 1658 # We pulled a specific subset
1659 1659 # sync on this subset
1660 1660 subset = heads
1661 1661
1662 1662 # Get remote phases data from remote
1663 1663 remotephases = remote.listkeys('phases')
1664 1664 publishing = bool(remotephases.get('publishing', False))
1665 1665 if remotephases and not publishing:
1666 1666 # remote is new and unpublishing
1667 1667 pheads, _dr = phases.analyzeremotephases(self, subset,
1668 1668 remotephases)
1669 1669 phases.advanceboundary(self, phases.public, pheads)
1670 1670 phases.advanceboundary(self, phases.draft, subset)
1671 1671 else:
1672 1672 # Remote is old or publishing all common changesets
1673 1673 # should be seen as public
1674 1674 phases.advanceboundary(self, phases.public, subset)
1675 1675
1676 1676 remoteobs = remote.listkeys('obsolete')
1677 1677 if 'dump' in remoteobs:
1678 1678 if tr is None:
1679 1679 tr = self.transaction(trname)
1680 1680 data = base85.b85decode(remoteobs['dump'])
1681 1681 self.obsstore.mergemarkers(tr, data)
1682 1682 if tr is not None:
1683 1683 tr.close()
1684 1684 finally:
1685 1685 if tr is not None:
1686 1686 tr.release()
1687 1687 lock.release()
1688 1688
1689 1689 return result
1690 1690
1691 1691 def checkpush(self, force, revs):
1692 1692 """Extensions can override this function if additional checks have
1693 1693 to be performed before pushing, or call it if they override push
1694 1694 command.
1695 1695 """
1696 1696 pass
1697 1697
1698 1698 def push(self, remote, force=False, revs=None, newbranch=False):
1699 1699 '''Push outgoing changesets (limited by revs) from the current
1700 1700 repository to remote. Return an integer:
1701 1701 - None means nothing to push
1702 1702 - 0 means HTTP error
1703 1703 - 1 means we pushed and remote head count is unchanged *or*
1704 1704 we have outgoing changesets but refused to push
1705 1705 - other values as described by addchangegroup()
1706 1706 '''
1707 1707 # there are two ways to push to remote repo:
1708 1708 #
1709 1709 # addchangegroup assumes local user can lock remote
1710 1710 # repo (local filesystem, old ssh servers).
1711 1711 #
1712 1712 # unbundle assumes local user cannot lock remote repo (new ssh
1713 1713 # servers, http servers).
1714 1714
1715 1715 # get local lock as we might write phase data
1716 1716 locallock = self.lock()
1717 1717 try:
1718 1718 self.checkpush(force, revs)
1719 1719 lock = None
1720 1720 unbundle = remote.capable('unbundle')
1721 1721 if not unbundle:
1722 1722 lock = remote.lock()
1723 1723 try:
1724 1724 # discovery
1725 1725 fci = discovery.findcommonincoming
1726 1726 commoninc = fci(self, remote, force=force)
1727 1727 common, inc, remoteheads = commoninc
1728 1728 fco = discovery.findcommonoutgoing
1729 1729 outgoing = fco(self, remote, onlyheads=revs,
1730 1730 commoninc=commoninc, force=force)
1731 1731
1732 1732
1733 1733 if not outgoing.missing:
1734 1734 # nothing to push
1735 1735 scmutil.nochangesfound(self.ui, outgoing.excluded)
1736 1736 ret = None
1737 1737 else:
1738 1738 # something to push
1739 1739 if not force:
1740 1740 discovery.checkheads(self, remote, outgoing,
1741 1741 remoteheads, newbranch,
1742 1742 bool(inc))
1743 1743
1744 1744 # create a changegroup from local
1745 1745 if revs is None and not outgoing.excluded:
1746 1746 # push everything,
1747 1747 # use the fast path, no race possible on push
1748 1748 cg = self._changegroup(outgoing.missing, 'push')
1749 1749 else:
1750 1750 cg = self.getlocalbundle('push', outgoing)
1751 1751
1752 1752 # apply changegroup to remote
1753 1753 if unbundle:
1754 1754 # local repo finds heads on server, finds out what
1755 1755 # revs it must push. once revs transferred, if server
1756 1756 # finds it has different heads (someone else won
1757 1757 # commit/push race), server aborts.
1758 1758 if force:
1759 1759 remoteheads = ['force']
1760 1760 # ssh: return remote's addchangegroup()
1761 1761 # http: return remote's addchangegroup() or 0 for error
1762 1762 ret = remote.unbundle(cg, remoteheads, 'push')
1763 1763 else:
1764 1764 # we return an integer indicating remote head count
1765 1765 # change
1766 1766 ret = remote.addchangegroup(cg, 'push', self.url())
1767 1767
1768 1768 if ret:
1769 1769 # push succeed, synchonize target of the push
1770 1770 cheads = outgoing.missingheads
1771 1771 elif revs is None:
1772 1772 # All out push fails. synchronize all common
1773 1773 cheads = outgoing.commonheads
1774 1774 else:
1775 1775 # I want cheads = heads(::missingheads and ::commonheads)
1776 1776 # (missingheads is revs with secret changeset filtered out)
1777 1777 #
1778 1778 # This can be expressed as:
1779 1779 # cheads = ( (missingheads and ::commonheads)
1780 1780 # + (commonheads and ::missingheads))"
1781 1781 # )
1782 1782 #
1783 1783 # while trying to push we already computed the following:
1784 1784 # common = (::commonheads)
1785 1785 # missing = ((commonheads::missingheads) - commonheads)
1786 1786 #
1787 1787 # We can pick:
1788 1788 # * missingheads part of comon (::commonheads)
1789 1789 common = set(outgoing.common)
1790 1790 cheads = [node for node in revs if node in common]
1791 1791 # and
1792 1792 # * commonheads parents on missing
1793 1793 revset = self.set('%ln and parents(roots(%ln))',
1794 1794 outgoing.commonheads,
1795 1795 outgoing.missing)
1796 1796 cheads.extend(c.node() for c in revset)
1797 1797 # even when we don't push, exchanging phase data is useful
1798 1798 remotephases = remote.listkeys('phases')
1799 1799 if not remotephases: # old server or public only repo
1800 1800 phases.advanceboundary(self, phases.public, cheads)
1801 1801 # don't push any phase data as there is nothing to push
1802 1802 else:
1803 1803 ana = phases.analyzeremotephases(self, cheads, remotephases)
1804 1804 pheads, droots = ana
1805 1805 ### Apply remote phase on local
1806 1806 if remotephases.get('publishing', False):
1807 1807 phases.advanceboundary(self, phases.public, cheads)
1808 1808 else: # publish = False
1809 1809 phases.advanceboundary(self, phases.public, pheads)
1810 1810 phases.advanceboundary(self, phases.draft, cheads)
1811 1811 ### Apply local phase on remote
1812 1812
1813 1813 # Get the list of all revs draft on remote by public here.
1814 1814 # XXX Beware that revset break if droots is not strictly
1815 1815 # XXX root we may want to ensure it is but it is costly
1816 1816 outdated = self.set('heads((%ln::%ln) and public())',
1817 1817 droots, cheads)
1818 1818 for newremotehead in outdated:
1819 1819 r = remote.pushkey('phases',
1820 1820 newremotehead.hex(),
1821 1821 str(phases.draft),
1822 1822 str(phases.public))
1823 1823 if not r:
1824 1824 self.ui.warn(_('updating %s to public failed!\n')
1825 1825 % newremotehead)
1826 1826 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1827 1827 data = self.listkeys('obsolete')['dump']
1828 1828 r = remote.pushkey('obsolete', 'dump', '', data)
1829 1829 if not r:
1830 1830 self.ui.warn(_('failed to push obsolete markers!\n'))
1831 1831 finally:
1832 1832 if lock is not None:
1833 1833 lock.release()
1834 1834 finally:
1835 1835 locallock.release()
1836 1836
1837 1837 self.ui.debug("checking for updated bookmarks\n")
1838 1838 rb = remote.listkeys('bookmarks')
1839 1839 for k in rb.keys():
1840 1840 if k in self._bookmarks:
1841 1841 nr, nl = rb[k], hex(self._bookmarks[k])
1842 1842 if nr in self:
1843 1843 cr = self[nr]
1844 1844 cl = self[nl]
1845 1845 if cl in cr.descendants():
1846 1846 r = remote.pushkey('bookmarks', k, nr, nl)
1847 1847 if r:
1848 1848 self.ui.status(_("updating bookmark %s\n") % k)
1849 1849 else:
1850 1850 self.ui.warn(_('updating bookmark %s'
1851 1851 ' failed!\n') % k)
1852 1852
1853 1853 return ret
1854 1854
1855 1855 def changegroupinfo(self, nodes, source):
1856 1856 if self.ui.verbose or source == 'bundle':
1857 1857 self.ui.status(_("%d changesets found\n") % len(nodes))
1858 1858 if self.ui.debugflag:
1859 1859 self.ui.debug("list of changesets:\n")
1860 1860 for node in nodes:
1861 1861 self.ui.debug("%s\n" % hex(node))
1862 1862
1863 1863 def changegroupsubset(self, bases, heads, source):
1864 1864 """Compute a changegroup consisting of all the nodes that are
1865 1865 descendants of any of the bases and ancestors of any of the heads.
1866 1866 Return a chunkbuffer object whose read() method will return
1867 1867 successive changegroup chunks.
1868 1868
1869 1869 It is fairly complex as determining which filenodes and which
1870 1870 manifest nodes need to be included for the changeset to be complete
1871 1871 is non-trivial.
1872 1872
1873 1873 Another wrinkle is doing the reverse, figuring out which changeset in
1874 1874 the changegroup a particular filenode or manifestnode belongs to.
1875 1875 """
1876 1876 cl = self.changelog
1877 1877 if not bases:
1878 1878 bases = [nullid]
1879 1879 csets, bases, heads = cl.nodesbetween(bases, heads)
1880 1880 # We assume that all ancestors of bases are known
1881 1881 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1882 1882 return self._changegroupsubset(common, csets, heads, source)
1883 1883
1884 1884 def getlocalbundle(self, source, outgoing):
1885 1885 """Like getbundle, but taking a discovery.outgoing as an argument.
1886 1886
1887 1887 This is only implemented for local repos and reuses potentially
1888 1888 precomputed sets in outgoing."""
1889 1889 if not outgoing.missing:
1890 1890 return None
1891 1891 return self._changegroupsubset(outgoing.common,
1892 1892 outgoing.missing,
1893 1893 outgoing.missingheads,
1894 1894 source)
1895 1895
1896 1896 def getbundle(self, source, heads=None, common=None):
1897 1897 """Like changegroupsubset, but returns the set difference between the
1898 1898 ancestors of heads and the ancestors common.
1899 1899
1900 1900 If heads is None, use the local heads. If common is None, use [nullid].
1901 1901
1902 1902 The nodes in common might not all be known locally due to the way the
1903 1903 current discovery protocol works.
1904 1904 """
1905 1905 cl = self.changelog
1906 1906 if common:
1907 1907 nm = cl.nodemap
1908 1908 common = [n for n in common if n in nm]
1909 1909 else:
1910 1910 common = [nullid]
1911 1911 if not heads:
1912 1912 heads = cl.heads()
1913 1913 return self.getlocalbundle(source,
1914 1914 discovery.outgoing(cl, common, heads))
1915 1915
1916 1916 def _changegroupsubset(self, commonrevs, csets, heads, source):
1917 1917
1918 1918 cl = self.changelog
1919 1919 mf = self.manifest
1920 1920 mfs = {} # needed manifests
1921 1921 fnodes = {} # needed file nodes
1922 1922 changedfiles = set()
1923 1923 fstate = ['', {}]
1924 1924 count = [0, 0]
1925 1925
1926 1926 # can we go through the fast path ?
1927 1927 heads.sort()
1928 1928 if heads == sorted(self.heads()):
1929 1929 return self._changegroup(csets, source)
1930 1930
1931 1931 # slow path
1932 1932 self.hook('preoutgoing', throw=True, source=source)
1933 1933 self.changegroupinfo(csets, source)
1934 1934
1935 1935 # filter any nodes that claim to be part of the known set
1936 1936 def prune(revlog, missing):
1937 1937 rr, rl = revlog.rev, revlog.linkrev
1938 1938 return [n for n in missing
1939 1939 if rl(rr(n)) not in commonrevs]
1940 1940
1941 1941 progress = self.ui.progress
1942 1942 _bundling = _('bundling')
1943 1943 _changesets = _('changesets')
1944 1944 _manifests = _('manifests')
1945 1945 _files = _('files')
1946 1946
1947 1947 def lookup(revlog, x):
1948 1948 if revlog == cl:
1949 1949 c = cl.read(x)
1950 1950 changedfiles.update(c[3])
1951 1951 mfs.setdefault(c[0], x)
1952 1952 count[0] += 1
1953 1953 progress(_bundling, count[0],
1954 1954 unit=_changesets, total=count[1])
1955 1955 return x
1956 1956 elif revlog == mf:
1957 1957 clnode = mfs[x]
1958 1958 mdata = mf.readfast(x)
1959 1959 for f, n in mdata.iteritems():
1960 1960 if f in changedfiles:
1961 1961 fnodes[f].setdefault(n, clnode)
1962 1962 count[0] += 1
1963 1963 progress(_bundling, count[0],
1964 1964 unit=_manifests, total=count[1])
1965 1965 return clnode
1966 1966 else:
1967 1967 progress(_bundling, count[0], item=fstate[0],
1968 1968 unit=_files, total=count[1])
1969 1969 return fstate[1][x]
1970 1970
1971 1971 bundler = changegroup.bundle10(lookup)
1972 1972 reorder = self.ui.config('bundle', 'reorder', 'auto')
1973 1973 if reorder == 'auto':
1974 1974 reorder = None
1975 1975 else:
1976 1976 reorder = util.parsebool(reorder)
1977 1977
1978 1978 def gengroup():
1979 1979 # Create a changenode group generator that will call our functions
1980 1980 # back to lookup the owning changenode and collect information.
1981 1981 count[:] = [0, len(csets)]
1982 1982 for chunk in cl.group(csets, bundler, reorder=reorder):
1983 1983 yield chunk
1984 1984 progress(_bundling, None)
1985 1985
1986 1986 # Create a generator for the manifestnodes that calls our lookup
1987 1987 # and data collection functions back.
1988 1988 for f in changedfiles:
1989 1989 fnodes[f] = {}
1990 1990 count[:] = [0, len(mfs)]
1991 1991 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1992 1992 yield chunk
1993 1993 progress(_bundling, None)
1994 1994
1995 1995 mfs.clear()
1996 1996
1997 1997 # Go through all our files in order sorted by name.
1998 1998 count[:] = [0, len(changedfiles)]
1999 1999 for fname in sorted(changedfiles):
2000 2000 filerevlog = self.file(fname)
2001 2001 if not len(filerevlog):
2002 2002 raise util.Abort(_("empty or missing revlog for %s")
2003 2003 % fname)
2004 2004 fstate[0] = fname
2005 2005 fstate[1] = fnodes.pop(fname, {})
2006 2006
2007 2007 nodelist = prune(filerevlog, fstate[1])
2008 2008 if nodelist:
2009 2009 count[0] += 1
2010 2010 yield bundler.fileheader(fname)
2011 2011 for chunk in filerevlog.group(nodelist, bundler, reorder):
2012 2012 yield chunk
2013 2013
2014 2014 # Signal that no more groups are left.
2015 2015 yield bundler.close()
2016 2016 progress(_bundling, None)
2017 2017
2018 2018 if csets:
2019 2019 self.hook('outgoing', node=hex(csets[0]), source=source)
2020 2020
2021 2021 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2022 2022
2023 2023 def changegroup(self, basenodes, source):
2024 2024 # to avoid a race we use changegroupsubset() (issue1320)
2025 2025 return self.changegroupsubset(basenodes, self.heads(), source)
2026 2026
2027 2027 def _changegroup(self, nodes, source):
2028 2028 """Compute the changegroup of all nodes that we have that a recipient
2029 2029 doesn't. Return a chunkbuffer object whose read() method will return
2030 2030 successive changegroup chunks.
2031 2031
2032 2032 This is much easier than the previous function as we can assume that
2033 2033 the recipient has any changenode we aren't sending them.
2034 2034
2035 2035 nodes is the set of nodes to send"""
2036 2036
2037 2037 cl = self.changelog
2038 2038 mf = self.manifest
2039 2039 mfs = {}
2040 2040 changedfiles = set()
2041 2041 fstate = ['']
2042 2042 count = [0, 0]
2043 2043
2044 2044 self.hook('preoutgoing', throw=True, source=source)
2045 2045 self.changegroupinfo(nodes, source)
2046 2046
2047 2047 revset = set([cl.rev(n) for n in nodes])
2048 2048
2049 2049 def gennodelst(log):
2050 2050 ln, llr = log.node, log.linkrev
2051 2051 return [ln(r) for r in log if llr(r) in revset]
2052 2052
2053 2053 progress = self.ui.progress
2054 2054 _bundling = _('bundling')
2055 2055 _changesets = _('changesets')
2056 2056 _manifests = _('manifests')
2057 2057 _files = _('files')
2058 2058
2059 2059 def lookup(revlog, x):
2060 2060 if revlog == cl:
2061 2061 c = cl.read(x)
2062 2062 changedfiles.update(c[3])
2063 2063 mfs.setdefault(c[0], x)
2064 2064 count[0] += 1
2065 2065 progress(_bundling, count[0],
2066 2066 unit=_changesets, total=count[1])
2067 2067 return x
2068 2068 elif revlog == mf:
2069 2069 count[0] += 1
2070 2070 progress(_bundling, count[0],
2071 2071 unit=_manifests, total=count[1])
2072 2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2073 2073 else:
2074 2074 progress(_bundling, count[0], item=fstate[0],
2075 2075 total=count[1], unit=_files)
2076 2076 return cl.node(revlog.linkrev(revlog.rev(x)))
2077 2077
2078 2078 bundler = changegroup.bundle10(lookup)
2079 2079 reorder = self.ui.config('bundle', 'reorder', 'auto')
2080 2080 if reorder == 'auto':
2081 2081 reorder = None
2082 2082 else:
2083 2083 reorder = util.parsebool(reorder)
2084 2084
2085 2085 def gengroup():
2086 2086 '''yield a sequence of changegroup chunks (strings)'''
2087 2087 # construct a list of all changed files
2088 2088
2089 2089 count[:] = [0, len(nodes)]
2090 2090 for chunk in cl.group(nodes, bundler, reorder=reorder):
2091 2091 yield chunk
2092 2092 progress(_bundling, None)
2093 2093
2094 2094 count[:] = [0, len(mfs)]
2095 2095 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2096 2096 yield chunk
2097 2097 progress(_bundling, None)
2098 2098
2099 2099 count[:] = [0, len(changedfiles)]
2100 2100 for fname in sorted(changedfiles):
2101 2101 filerevlog = self.file(fname)
2102 2102 if not len(filerevlog):
2103 2103 raise util.Abort(_("empty or missing revlog for %s")
2104 2104 % fname)
2105 2105 fstate[0] = fname
2106 2106 nodelist = gennodelst(filerevlog)
2107 2107 if nodelist:
2108 2108 count[0] += 1
2109 2109 yield bundler.fileheader(fname)
2110 2110 for chunk in filerevlog.group(nodelist, bundler, reorder):
2111 2111 yield chunk
2112 2112 yield bundler.close()
2113 2113 progress(_bundling, None)
2114 2114
2115 2115 if nodes:
2116 2116 self.hook('outgoing', node=hex(nodes[0]), source=source)
2117 2117
2118 2118 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2119 2119
2120 2120 def addchangegroup(self, source, srctype, url, emptyok=False):
2121 2121 """Add the changegroup returned by source.read() to this repo.
2122 2122 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2123 2123 the URL of the repo where this changegroup is coming from.
2124 2124
2125 2125 Return an integer summarizing the change to this repo:
2126 2126 - nothing changed or no source: 0
2127 2127 - more heads than before: 1+added heads (2..n)
2128 2128 - fewer heads than before: -1-removed heads (-2..-n)
2129 2129 - number of heads stays the same: 1
2130 2130 """
2131 2131 def csmap(x):
2132 2132 self.ui.debug("add changeset %s\n" % short(x))
2133 2133 return len(cl)
2134 2134
2135 2135 def revmap(x):
2136 2136 return cl.rev(x)
2137 2137
2138 2138 if not source:
2139 2139 return 0
2140 2140
2141 2141 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2142 2142
2143 2143 changesets = files = revisions = 0
2144 2144 efiles = set()
2145 2145
2146 2146 # write changelog data to temp files so concurrent readers will not see
2147 2147 # inconsistent view
2148 2148 cl = self.changelog
2149 2149 cl.delayupdate()
2150 2150 oldheads = cl.heads()
2151 2151
2152 2152 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2153 2153 try:
2154 2154 trp = weakref.proxy(tr)
2155 2155 # pull off the changeset group
2156 2156 self.ui.status(_("adding changesets\n"))
2157 2157 clstart = len(cl)
2158 2158 class prog(object):
2159 2159 step = _('changesets')
2160 2160 count = 1
2161 2161 ui = self.ui
2162 2162 total = None
2163 2163 def __call__(self):
2164 2164 self.ui.progress(self.step, self.count, unit=_('chunks'),
2165 2165 total=self.total)
2166 2166 self.count += 1
2167 2167 pr = prog()
2168 2168 source.callback = pr
2169 2169
2170 2170 source.changelogheader()
2171 2171 srccontent = cl.addgroup(source, csmap, trp)
2172 2172 if not (srccontent or emptyok):
2173 2173 raise util.Abort(_("received changelog group is empty"))
2174 2174 clend = len(cl)
2175 2175 changesets = clend - clstart
2176 2176 for c in xrange(clstart, clend):
2177 2177 efiles.update(self[c].files())
2178 2178 efiles = len(efiles)
2179 2179 self.ui.progress(_('changesets'), None)
2180 2180
2181 2181 # pull off the manifest group
2182 2182 self.ui.status(_("adding manifests\n"))
2183 2183 pr.step = _('manifests')
2184 2184 pr.count = 1
2185 2185 pr.total = changesets # manifests <= changesets
2186 2186 # no need to check for empty manifest group here:
2187 2187 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2188 2188 # no new manifest will be created and the manifest group will
2189 2189 # be empty during the pull
2190 2190 source.manifestheader()
2191 2191 self.manifest.addgroup(source, revmap, trp)
2192 2192 self.ui.progress(_('manifests'), None)
2193 2193
2194 2194 needfiles = {}
2195 2195 if self.ui.configbool('server', 'validate', default=False):
2196 2196 # validate incoming csets have their manifests
2197 2197 for cset in xrange(clstart, clend):
2198 2198 mfest = self.changelog.read(self.changelog.node(cset))[0]
2199 2199 mfest = self.manifest.readdelta(mfest)
2200 2200 # store file nodes we must see
2201 2201 for f, n in mfest.iteritems():
2202 2202 needfiles.setdefault(f, set()).add(n)
2203 2203
2204 2204 # process the files
2205 2205 self.ui.status(_("adding file changes\n"))
2206 2206 pr.step = _('files')
2207 2207 pr.count = 1
2208 2208 pr.total = efiles
2209 2209 source.callback = None
2210 2210
2211 2211 while True:
2212 2212 chunkdata = source.filelogheader()
2213 2213 if not chunkdata:
2214 2214 break
2215 2215 f = chunkdata["filename"]
2216 2216 self.ui.debug("adding %s revisions\n" % f)
2217 2217 pr()
2218 2218 fl = self.file(f)
2219 2219 o = len(fl)
2220 2220 if not fl.addgroup(source, revmap, trp):
2221 2221 raise util.Abort(_("received file revlog group is empty"))
2222 2222 revisions += len(fl) - o
2223 2223 files += 1
2224 2224 if f in needfiles:
2225 2225 needs = needfiles[f]
2226 2226 for new in xrange(o, len(fl)):
2227 2227 n = fl.node(new)
2228 2228 if n in needs:
2229 2229 needs.remove(n)
2230 2230 if not needs:
2231 2231 del needfiles[f]
2232 2232 self.ui.progress(_('files'), None)
2233 2233
2234 2234 for f, needs in needfiles.iteritems():
2235 2235 fl = self.file(f)
2236 2236 for n in needs:
2237 2237 try:
2238 2238 fl.rev(n)
2239 2239 except error.LookupError:
2240 2240 raise util.Abort(
2241 2241 _('missing file data for %s:%s - run hg verify') %
2242 2242 (f, hex(n)))
2243 2243
2244 2244 dh = 0
2245 2245 if oldheads:
2246 2246 heads = cl.heads()
2247 2247 dh = len(heads) - len(oldheads)
2248 2248 for h in heads:
2249 2249 if h not in oldheads and self[h].closesbranch():
2250 2250 dh -= 1
2251 2251 htext = ""
2252 2252 if dh:
2253 2253 htext = _(" (%+d heads)") % dh
2254 2254
2255 2255 self.ui.status(_("added %d changesets"
2256 2256 " with %d changes to %d files%s\n")
2257 2257 % (changesets, revisions, files, htext))
2258 2258
2259 2259 if changesets > 0:
2260 2260 p = lambda: cl.writepending() and self.root or ""
2261 2261 self.hook('pretxnchangegroup', throw=True,
2262 2262 node=hex(cl.node(clstart)), source=srctype,
2263 2263 url=url, pending=p)
2264 2264
2265 2265 added = [cl.node(r) for r in xrange(clstart, clend)]
2266 2266 publishing = self.ui.configbool('phases', 'publish', True)
2267 2267 if srctype == 'push':
2268 2268 # Old server can not push the boundary themself.
2269 2269 # New server won't push the boundary if changeset already
2270 2270 # existed locally as secrete
2271 2271 #
2272 2272 # We should not use added here but the list of all change in
2273 2273 # the bundle
2274 2274 if publishing:
2275 2275 phases.advanceboundary(self, phases.public, srccontent)
2276 2276 else:
2277 2277 phases.advanceboundary(self, phases.draft, srccontent)
2278 2278 phases.retractboundary(self, phases.draft, added)
2279 2279 elif srctype != 'strip':
2280 2280 # publishing only alter behavior during push
2281 2281 #
2282 2282 # strip should not touch boundary at all
2283 2283 phases.retractboundary(self, phases.draft, added)
2284 2284
2285 2285 # make changelog see real files again
2286 2286 cl.finalize(trp)
2287 2287
2288 2288 tr.close()
2289 2289
2290 2290 if changesets > 0:
2291 2291 def runhooks():
2292 2292 # forcefully update the on-disk branch cache
2293 2293 self.ui.debug("updating the branch cache\n")
2294 2294 self.updatebranchcache()
2295 2295 self.hook("changegroup", node=hex(cl.node(clstart)),
2296 2296 source=srctype, url=url)
2297 2297
2298 2298 for n in added:
2299 2299 self.hook("incoming", node=hex(n), source=srctype,
2300 2300 url=url)
2301 2301 self._afterlock(runhooks)
2302 2302
2303 2303 finally:
2304 2304 tr.release()
2305 2305 # never return 0 here:
2306 2306 if dh < 0:
2307 2307 return dh - 1
2308 2308 else:
2309 2309 return dh + 1
2310 2310
2311 2311 def stream_in(self, remote, requirements):
2312 2312 lock = self.lock()
2313 2313 try:
2314 2314 fp = remote.stream_out()
2315 2315 l = fp.readline()
2316 2316 try:
2317 2317 resp = int(l)
2318 2318 except ValueError:
2319 2319 raise error.ResponseError(
2320 2320 _('unexpected response from remote server:'), l)
2321 2321 if resp == 1:
2322 2322 raise util.Abort(_('operation forbidden by server'))
2323 2323 elif resp == 2:
2324 2324 raise util.Abort(_('locking the remote repository failed'))
2325 2325 elif resp != 0:
2326 2326 raise util.Abort(_('the server sent an unknown error code'))
2327 2327 self.ui.status(_('streaming all changes\n'))
2328 2328 l = fp.readline()
2329 2329 try:
2330 2330 total_files, total_bytes = map(int, l.split(' ', 1))
2331 2331 except (ValueError, TypeError):
2332 2332 raise error.ResponseError(
2333 2333 _('unexpected response from remote server:'), l)
2334 2334 self.ui.status(_('%d files to transfer, %s of data\n') %
2335 2335 (total_files, util.bytecount(total_bytes)))
2336 2336 handled_bytes = 0
2337 2337 self.ui.progress(_('clone'), 0, total=total_bytes)
2338 2338 start = time.time()
2339 2339 for i in xrange(total_files):
2340 2340 # XXX doesn't support '\n' or '\r' in filenames
2341 2341 l = fp.readline()
2342 2342 try:
2343 2343 name, size = l.split('\0', 1)
2344 2344 size = int(size)
2345 2345 except (ValueError, TypeError):
2346 2346 raise error.ResponseError(
2347 2347 _('unexpected response from remote server:'), l)
2348 2348 if self.ui.debugflag:
2349 2349 self.ui.debug('adding %s (%s)\n' %
2350 2350 (name, util.bytecount(size)))
2351 2351 # for backwards compat, name was partially encoded
2352 2352 ofp = self.sopener(store.decodedir(name), 'w')
2353 2353 for chunk in util.filechunkiter(fp, limit=size):
2354 2354 handled_bytes += len(chunk)
2355 2355 self.ui.progress(_('clone'), handled_bytes,
2356 2356 total=total_bytes)
2357 2357 ofp.write(chunk)
2358 2358 ofp.close()
2359 2359 elapsed = time.time() - start
2360 2360 if elapsed <= 0:
2361 2361 elapsed = 0.001
2362 2362 self.ui.progress(_('clone'), None)
2363 2363 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2364 2364 (util.bytecount(total_bytes), elapsed,
2365 2365 util.bytecount(total_bytes / elapsed)))
2366 2366
2367 2367 # new requirements = old non-format requirements +
2368 2368 # new format-related
2369 2369 # requirements from the streamed-in repository
2370 2370 requirements.update(set(self.requirements) - self.supportedformats)
2371 2371 self._applyrequirements(requirements)
2372 2372 self._writerequirements()
2373 2373
2374 2374 self.invalidate()
2375 2375 return len(self.heads()) + 1
2376 2376 finally:
2377 2377 lock.release()
2378 2378
2379 2379 def clone(self, remote, heads=[], stream=False):
2380 2380 '''clone remote repository.
2381 2381
2382 2382 keyword arguments:
2383 2383 heads: list of revs to clone (forces use of pull)
2384 2384 stream: use streaming clone if possible'''
2385 2385
2386 2386 # now, all clients that can request uncompressed clones can
2387 2387 # read repo formats supported by all servers that can serve
2388 2388 # them.
2389 2389
2390 2390 # if revlog format changes, client will have to check version
2391 2391 # and format flags on "stream" capability, and use
2392 2392 # uncompressed only if compatible.
2393 2393
2394 2394 if not stream:
2395 2395 # if the server explicitely prefer to stream (for fast LANs)
2396 2396 stream = remote.capable('stream-preferred')
2397 2397
2398 2398 if stream and not heads:
2399 2399 # 'stream' means remote revlog format is revlogv1 only
2400 2400 if remote.capable('stream'):
2401 2401 return self.stream_in(remote, set(('revlogv1',)))
2402 2402 # otherwise, 'streamreqs' contains the remote revlog format
2403 2403 streamreqs = remote.capable('streamreqs')
2404 2404 if streamreqs:
2405 2405 streamreqs = set(streamreqs.split(','))
2406 2406 # if we support it, stream in and adjust our requirements
2407 2407 if not streamreqs - self.supportedformats:
2408 2408 return self.stream_in(remote, streamreqs)
2409 2409 return self.pull(remote, heads)
2410 2410
2411 2411 def pushkey(self, namespace, key, old, new):
2412 2412 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2413 2413 old=old, new=new)
2414 2414 ret = pushkey.push(self, namespace, key, old, new)
2415 2415 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2416 2416 ret=ret)
2417 2417 return ret
2418 2418
2419 2419 def listkeys(self, namespace):
2420 2420 self.hook('prelistkeys', throw=True, namespace=namespace)
2421 2421 values = pushkey.list(self, namespace)
2422 2422 self.hook('listkeys', namespace=namespace, values=values)
2423 2423 return values
2424 2424
2425 2425 def debugwireargs(self, one, two, three=None, four=None, five=None):
2426 2426 '''used to test argument passing over the wire'''
2427 2427 return "%s %s %s %s %s" % (one, two, three, four, five)
2428 2428
2429 2429 def savecommitmessage(self, text):
2430 2430 fp = self.opener('last-message.txt', 'wb')
2431 2431 try:
2432 2432 fp.write(text)
2433 2433 finally:
2434 2434 fp.close()
2435 2435 return self.pathto(fp.name[len(self.root)+1:])
2436 2436
2437 2437 # used to avoid circular references so destructors work
2438 2438 def aftertrans(files):
2439 2439 renamefiles = [tuple(t) for t in files]
2440 2440 def a():
2441 2441 for src, dest in renamefiles:
2442 2442 try:
2443 2443 util.rename(src, dest)
2444 2444 except OSError: # journal file does not yet exist
2445 2445 pass
2446 2446 return a
2447 2447
2448 2448 def undoname(fn):
2449 2449 base, name = os.path.split(fn)
2450 2450 assert name.startswith('journal')
2451 2451 return os.path.join(base, name.replace('journal', 'undo', 1))
2452 2452
2453 2453 def instance(ui, path, create):
2454 2454 return localrepository(ui, util.urllocalpath(path), create)
2455 2455
2456 2456 def islocal(path):
2457 2457 return True
General Comments 0
You need to be logged in to leave comments. Login now