##// END OF EJS Templates
phases: add a cache allowing to know in which phase a changeset is
Pierre-Yves David -
r15420:e80d0d31 default
parent child Browse files
Show More
@@ -1,2108 +1,2120 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener.append(
61 61 "00changelog.i",
62 62 '\0\0\0\2' # represents revlogv2
63 63 ' dummy changelog to prevent using the old repo layout'
64 64 )
65 65 if self.ui.configbool('format', 'generaldelta', False):
66 66 requirements.append("generaldelta")
67 67 requirements = set(requirements)
68 68 else:
69 69 raise error.RepoError(_("repository %s not found") % path)
70 70 elif create:
71 71 raise error.RepoError(_("repository %s already exists") % path)
72 72 else:
73 73 try:
74 74 requirements = scmutil.readrequires(self.opener, self.supported)
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 requirements = set()
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100
101 101 self._branchcache = None
102 102 self._branchcachetip = None
103 103 self.filterpats = {}
104 104 self._datafilters = {}
105 105 self._transref = self._lockref = self._wlockref = None
106 106
107 107 # A cache for various files under .hg/ that tracks file changes,
108 108 # (used by the filecache decorator)
109 109 #
110 110 # Maps a property name to its util.filecacheentry
111 111 self._filecache = {}
112 112
113 113 def _applyrequirements(self, requirements):
114 114 self.requirements = requirements
115 115 openerreqs = set(('revlogv1', 'generaldelta'))
116 116 self.sopener.options = dict((r, 1) for r in requirements
117 117 if r in openerreqs)
118 118
119 119 def _writerequirements(self):
120 120 reqfile = self.opener("requires", "w")
121 121 for r in self.requirements:
122 122 reqfile.write("%s\n" % r)
123 123 reqfile.close()
124 124
125 125 def _checknested(self, path):
126 126 """Determine if path is a legal nested repository."""
127 127 if not path.startswith(self.root):
128 128 return False
129 129 subpath = path[len(self.root) + 1:]
130 130
131 131 # XXX: Checking against the current working copy is wrong in
132 132 # the sense that it can reject things like
133 133 #
134 134 # $ hg cat -r 10 sub/x.txt
135 135 #
136 136 # if sub/ is no longer a subrepository in the working copy
137 137 # parent revision.
138 138 #
139 139 # However, it can of course also allow things that would have
140 140 # been rejected before, such as the above cat command if sub/
141 141 # is a subrepository now, but was a normal directory before.
142 142 # The old path auditor would have rejected by mistake since it
143 143 # panics when it sees sub/.hg/.
144 144 #
145 145 # All in all, checking against the working copy seems sensible
146 146 # since we want to prevent access to nested repositories on
147 147 # the filesystem *now*.
148 148 ctx = self[None]
149 149 parts = util.splitpath(subpath)
150 150 while parts:
151 151 prefix = os.sep.join(parts)
152 152 if prefix in ctx.substate:
153 153 if prefix == subpath:
154 154 return True
155 155 else:
156 156 sub = ctx.sub(prefix)
157 157 return sub.checknested(subpath[len(prefix) + 1:])
158 158 else:
159 159 parts.pop()
160 160 return False
161 161
162 162 @filecache('bookmarks')
163 163 def _bookmarks(self):
164 164 return bookmarks.read(self)
165 165
166 166 @filecache('bookmarks.current')
167 167 def _bookmarkcurrent(self):
168 168 return bookmarks.readcurrent(self)
169 169
170 170 def _writebookmarks(self, marks):
171 171 bookmarks.write(self)
172 172
173 173 @filecache('phaseroots')
174 174 def _phaseroots(self):
175 175 return phases.readroots(self)
176 176
177 @propertycache
178 def _phaserev(self):
179 cache = [0] * len(self)
180 for phase in phases.trackedphases:
181 roots = map(self.changelog.rev, self._phaseroots[phase])
182 if roots:
183 for rev in roots:
184 cache[rev] = phase
185 for rev in self.changelog.descendants(*roots):
186 cache[rev] = phase
187 return cache
188
177 189 @filecache('00changelog.i', True)
178 190 def changelog(self):
179 191 c = changelog.changelog(self.sopener)
180 192 if 'HG_PENDING' in os.environ:
181 193 p = os.environ['HG_PENDING']
182 194 if p.startswith(self.root):
183 195 c.readpending('00changelog.i.a')
184 196 return c
185 197
186 198 @filecache('00manifest.i', True)
187 199 def manifest(self):
188 200 return manifest.manifest(self.sopener)
189 201
190 202 @filecache('dirstate')
191 203 def dirstate(self):
192 204 warned = [0]
193 205 def validate(node):
194 206 try:
195 207 self.changelog.rev(node)
196 208 return node
197 209 except error.LookupError:
198 210 if not warned[0]:
199 211 warned[0] = True
200 212 self.ui.warn(_("warning: ignoring unknown"
201 213 " working parent %s!\n") % short(node))
202 214 return nullid
203 215
204 216 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
205 217
206 218 def __getitem__(self, changeid):
207 219 if changeid is None:
208 220 return context.workingctx(self)
209 221 return context.changectx(self, changeid)
210 222
211 223 def __contains__(self, changeid):
212 224 try:
213 225 return bool(self.lookup(changeid))
214 226 except error.RepoLookupError:
215 227 return False
216 228
217 229 def __nonzero__(self):
218 230 return True
219 231
220 232 def __len__(self):
221 233 return len(self.changelog)
222 234
223 235 def __iter__(self):
224 236 for i in xrange(len(self)):
225 237 yield i
226 238
227 239 def revs(self, expr, *args):
228 240 '''Return a list of revisions matching the given revset'''
229 241 expr = revset.formatspec(expr, *args)
230 242 m = revset.match(None, expr)
231 243 return [r for r in m(self, range(len(self)))]
232 244
233 245 def set(self, expr, *args):
234 246 '''
235 247 Yield a context for each matching revision, after doing arg
236 248 replacement via revset.formatspec
237 249 '''
238 250 for r in self.revs(expr, *args):
239 251 yield self[r]
240 252
241 253 def url(self):
242 254 return 'file:' + self.root
243 255
244 256 def hook(self, name, throw=False, **args):
245 257 return hook.hook(self.ui, self, name, throw, **args)
246 258
247 259 tag_disallowed = ':\r\n'
248 260
249 261 def _tag(self, names, node, message, local, user, date, extra={}):
250 262 if isinstance(names, str):
251 263 allchars = names
252 264 names = (names,)
253 265 else:
254 266 allchars = ''.join(names)
255 267 for c in self.tag_disallowed:
256 268 if c in allchars:
257 269 raise util.Abort(_('%r cannot be used in a tag name') % c)
258 270
259 271 branches = self.branchmap()
260 272 for name in names:
261 273 self.hook('pretag', throw=True, node=hex(node), tag=name,
262 274 local=local)
263 275 if name in branches:
264 276 self.ui.warn(_("warning: tag %s conflicts with existing"
265 277 " branch name\n") % name)
266 278
267 279 def writetags(fp, names, munge, prevtags):
268 280 fp.seek(0, 2)
269 281 if prevtags and prevtags[-1] != '\n':
270 282 fp.write('\n')
271 283 for name in names:
272 284 m = munge and munge(name) or name
273 285 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
274 286 old = self.tags().get(name, nullid)
275 287 fp.write('%s %s\n' % (hex(old), m))
276 288 fp.write('%s %s\n' % (hex(node), m))
277 289 fp.close()
278 290
279 291 prevtags = ''
280 292 if local:
281 293 try:
282 294 fp = self.opener('localtags', 'r+')
283 295 except IOError:
284 296 fp = self.opener('localtags', 'a')
285 297 else:
286 298 prevtags = fp.read()
287 299
288 300 # local tags are stored in the current charset
289 301 writetags(fp, names, None, prevtags)
290 302 for name in names:
291 303 self.hook('tag', node=hex(node), tag=name, local=local)
292 304 return
293 305
294 306 try:
295 307 fp = self.wfile('.hgtags', 'rb+')
296 308 except IOError, e:
297 309 if e.errno != errno.ENOENT:
298 310 raise
299 311 fp = self.wfile('.hgtags', 'ab')
300 312 else:
301 313 prevtags = fp.read()
302 314
303 315 # committed tags are stored in UTF-8
304 316 writetags(fp, names, encoding.fromlocal, prevtags)
305 317
306 318 fp.close()
307 319
308 320 if '.hgtags' not in self.dirstate:
309 321 self[None].add(['.hgtags'])
310 322
311 323 m = matchmod.exact(self.root, '', ['.hgtags'])
312 324 tagnode = self.commit(message, user, date, extra=extra, match=m)
313 325
314 326 for name in names:
315 327 self.hook('tag', node=hex(node), tag=name, local=local)
316 328
317 329 return tagnode
318 330
319 331 def tag(self, names, node, message, local, user, date):
320 332 '''tag a revision with one or more symbolic names.
321 333
322 334 names is a list of strings or, when adding a single tag, names may be a
323 335 string.
324 336
325 337 if local is True, the tags are stored in a per-repository file.
326 338 otherwise, they are stored in the .hgtags file, and a new
327 339 changeset is committed with the change.
328 340
329 341 keyword arguments:
330 342
331 343 local: whether to store tags in non-version-controlled file
332 344 (default False)
333 345
334 346 message: commit message to use if committing
335 347
336 348 user: name of user to use if committing
337 349
338 350 date: date tuple to use if committing'''
339 351
340 352 if not local:
341 353 for x in self.status()[:5]:
342 354 if '.hgtags' in x:
343 355 raise util.Abort(_('working copy of .hgtags is changed '
344 356 '(please commit .hgtags manually)'))
345 357
346 358 self.tags() # instantiate the cache
347 359 self._tag(names, node, message, local, user, date)
348 360
349 361 @propertycache
350 362 def _tagscache(self):
351 363 '''Returns a tagscache object that contains various tags related caches.'''
352 364
353 365 # This simplifies its cache management by having one decorated
354 366 # function (this one) and the rest simply fetch things from it.
355 367 class tagscache(object):
356 368 def __init__(self):
357 369 # These two define the set of tags for this repository. tags
358 370 # maps tag name to node; tagtypes maps tag name to 'global' or
359 371 # 'local'. (Global tags are defined by .hgtags across all
360 372 # heads, and local tags are defined in .hg/localtags.)
361 373 # They constitute the in-memory cache of tags.
362 374 self.tags = self.tagtypes = None
363 375
364 376 self.nodetagscache = self.tagslist = None
365 377
366 378 cache = tagscache()
367 379 cache.tags, cache.tagtypes = self._findtags()
368 380
369 381 return cache
370 382
371 383 def tags(self):
372 384 '''return a mapping of tag to node'''
373 385 return self._tagscache.tags
374 386
375 387 def _findtags(self):
376 388 '''Do the hard work of finding tags. Return a pair of dicts
377 389 (tags, tagtypes) where tags maps tag name to node, and tagtypes
378 390 maps tag name to a string like \'global\' or \'local\'.
379 391 Subclasses or extensions are free to add their own tags, but
380 392 should be aware that the returned dicts will be retained for the
381 393 duration of the localrepo object.'''
382 394
383 395 # XXX what tagtype should subclasses/extensions use? Currently
384 396 # mq and bookmarks add tags, but do not set the tagtype at all.
385 397 # Should each extension invent its own tag type? Should there
386 398 # be one tagtype for all such "virtual" tags? Or is the status
387 399 # quo fine?
388 400
389 401 alltags = {} # map tag name to (node, hist)
390 402 tagtypes = {}
391 403
392 404 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
393 405 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
394 406
395 407 # Build the return dicts. Have to re-encode tag names because
396 408 # the tags module always uses UTF-8 (in order not to lose info
397 409 # writing to the cache), but the rest of Mercurial wants them in
398 410 # local encoding.
399 411 tags = {}
400 412 for (name, (node, hist)) in alltags.iteritems():
401 413 if node != nullid:
402 414 try:
403 415 # ignore tags to unknown nodes
404 416 self.changelog.lookup(node)
405 417 tags[encoding.tolocal(name)] = node
406 418 except error.LookupError:
407 419 pass
408 420 tags['tip'] = self.changelog.tip()
409 421 tagtypes = dict([(encoding.tolocal(name), value)
410 422 for (name, value) in tagtypes.iteritems()])
411 423 return (tags, tagtypes)
412 424
413 425 def tagtype(self, tagname):
414 426 '''
415 427 return the type of the given tag. result can be:
416 428
417 429 'local' : a local tag
418 430 'global' : a global tag
419 431 None : tag does not exist
420 432 '''
421 433
422 434 return self._tagscache.tagtypes.get(tagname)
423 435
424 436 def tagslist(self):
425 437 '''return a list of tags ordered by revision'''
426 438 if not self._tagscache.tagslist:
427 439 l = []
428 440 for t, n in self.tags().iteritems():
429 441 r = self.changelog.rev(n)
430 442 l.append((r, t, n))
431 443 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
432 444
433 445 return self._tagscache.tagslist
434 446
435 447 def nodetags(self, node):
436 448 '''return the tags associated with a node'''
437 449 if not self._tagscache.nodetagscache:
438 450 nodetagscache = {}
439 451 for t, n in self.tags().iteritems():
440 452 nodetagscache.setdefault(n, []).append(t)
441 453 for tags in nodetagscache.itervalues():
442 454 tags.sort()
443 455 self._tagscache.nodetagscache = nodetagscache
444 456 return self._tagscache.nodetagscache.get(node, [])
445 457
446 458 def nodebookmarks(self, node):
447 459 marks = []
448 460 for bookmark, n in self._bookmarks.iteritems():
449 461 if n == node:
450 462 marks.append(bookmark)
451 463 return sorted(marks)
452 464
453 465 def _branchtags(self, partial, lrev):
454 466 # TODO: rename this function?
455 467 tiprev = len(self) - 1
456 468 if lrev != tiprev:
457 469 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
458 470 self._updatebranchcache(partial, ctxgen)
459 471 self._writebranchcache(partial, self.changelog.tip(), tiprev)
460 472
461 473 return partial
462 474
463 475 def updatebranchcache(self):
464 476 tip = self.changelog.tip()
465 477 if self._branchcache is not None and self._branchcachetip == tip:
466 478 return self._branchcache
467 479
468 480 oldtip = self._branchcachetip
469 481 self._branchcachetip = tip
470 482 if oldtip is None or oldtip not in self.changelog.nodemap:
471 483 partial, last, lrev = self._readbranchcache()
472 484 else:
473 485 lrev = self.changelog.rev(oldtip)
474 486 partial = self._branchcache
475 487
476 488 self._branchtags(partial, lrev)
477 489 # this private cache holds all heads (not just tips)
478 490 self._branchcache = partial
479 491
480 492 def branchmap(self):
481 493 '''returns a dictionary {branch: [branchheads]}'''
482 494 self.updatebranchcache()
483 495 return self._branchcache
484 496
485 497 def branchtags(self):
486 498 '''return a dict where branch names map to the tipmost head of
487 499 the branch, open heads come before closed'''
488 500 bt = {}
489 501 for bn, heads in self.branchmap().iteritems():
490 502 tip = heads[-1]
491 503 for h in reversed(heads):
492 504 if 'close' not in self.changelog.read(h)[5]:
493 505 tip = h
494 506 break
495 507 bt[bn] = tip
496 508 return bt
497 509
498 510 def _readbranchcache(self):
499 511 partial = {}
500 512 try:
501 513 f = self.opener("cache/branchheads")
502 514 lines = f.read().split('\n')
503 515 f.close()
504 516 except (IOError, OSError):
505 517 return {}, nullid, nullrev
506 518
507 519 try:
508 520 last, lrev = lines.pop(0).split(" ", 1)
509 521 last, lrev = bin(last), int(lrev)
510 522 if lrev >= len(self) or self[lrev].node() != last:
511 523 # invalidate the cache
512 524 raise ValueError('invalidating branch cache (tip differs)')
513 525 for l in lines:
514 526 if not l:
515 527 continue
516 528 node, label = l.split(" ", 1)
517 529 label = encoding.tolocal(label.strip())
518 530 partial.setdefault(label, []).append(bin(node))
519 531 except KeyboardInterrupt:
520 532 raise
521 533 except Exception, inst:
522 534 if self.ui.debugflag:
523 535 self.ui.warn(str(inst), '\n')
524 536 partial, last, lrev = {}, nullid, nullrev
525 537 return partial, last, lrev
526 538
527 539 def _writebranchcache(self, branches, tip, tiprev):
528 540 try:
529 541 f = self.opener("cache/branchheads", "w", atomictemp=True)
530 542 f.write("%s %s\n" % (hex(tip), tiprev))
531 543 for label, nodes in branches.iteritems():
532 544 for node in nodes:
533 545 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
534 546 f.close()
535 547 except (IOError, OSError):
536 548 pass
537 549
538 550 def _updatebranchcache(self, partial, ctxgen):
539 551 # collect new branch entries
540 552 newbranches = {}
541 553 for c in ctxgen:
542 554 newbranches.setdefault(c.branch(), []).append(c.node())
543 555 # if older branchheads are reachable from new ones, they aren't
544 556 # really branchheads. Note checking parents is insufficient:
545 557 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
546 558 for branch, newnodes in newbranches.iteritems():
547 559 bheads = partial.setdefault(branch, [])
548 560 bheads.extend(newnodes)
549 561 if len(bheads) <= 1:
550 562 continue
551 563 bheads = sorted(bheads, key=lambda x: self[x].rev())
552 564 # starting from tip means fewer passes over reachable
553 565 while newnodes:
554 566 latest = newnodes.pop()
555 567 if latest not in bheads:
556 568 continue
557 569 minbhrev = self[bheads[0]].node()
558 570 reachable = self.changelog.reachable(latest, minbhrev)
559 571 reachable.remove(latest)
560 572 if reachable:
561 573 bheads = [b for b in bheads if b not in reachable]
562 574 partial[branch] = bheads
563 575
564 576 def lookup(self, key):
565 577 if isinstance(key, int):
566 578 return self.changelog.node(key)
567 579 elif key == '.':
568 580 return self.dirstate.p1()
569 581 elif key == 'null':
570 582 return nullid
571 583 elif key == 'tip':
572 584 return self.changelog.tip()
573 585 n = self.changelog._match(key)
574 586 if n:
575 587 return n
576 588 if key in self._bookmarks:
577 589 return self._bookmarks[key]
578 590 if key in self.tags():
579 591 return self.tags()[key]
580 592 if key in self.branchtags():
581 593 return self.branchtags()[key]
582 594 n = self.changelog._partialmatch(key)
583 595 if n:
584 596 return n
585 597
586 598 # can't find key, check if it might have come from damaged dirstate
587 599 if key in self.dirstate.parents():
588 600 raise error.Abort(_("working directory has unknown parent '%s'!")
589 601 % short(key))
590 602 try:
591 603 if len(key) == 20:
592 604 key = hex(key)
593 605 except TypeError:
594 606 pass
595 607 raise error.RepoLookupError(_("unknown revision '%s'") % key)
596 608
597 609 def lookupbranch(self, key, remote=None):
598 610 repo = remote or self
599 611 if key in repo.branchmap():
600 612 return key
601 613
602 614 repo = (remote and remote.local()) and remote or self
603 615 return repo[key].branch()
604 616
605 617 def known(self, nodes):
606 618 nm = self.changelog.nodemap
607 619 return [(n in nm) for n in nodes]
608 620
609 621 def local(self):
610 622 return self
611 623
612 624 def join(self, f):
613 625 return os.path.join(self.path, f)
614 626
615 627 def wjoin(self, f):
616 628 return os.path.join(self.root, f)
617 629
618 630 def file(self, f):
619 631 if f[0] == '/':
620 632 f = f[1:]
621 633 return filelog.filelog(self.sopener, f)
622 634
623 635 def changectx(self, changeid):
624 636 return self[changeid]
625 637
626 638 def parents(self, changeid=None):
627 639 '''get list of changectxs for parents of changeid'''
628 640 return self[changeid].parents()
629 641
630 642 def filectx(self, path, changeid=None, fileid=None):
631 643 """changeid can be a changeset revision, node, or tag.
632 644 fileid can be a file revision or node."""
633 645 return context.filectx(self, path, changeid, fileid)
634 646
635 647 def getcwd(self):
636 648 return self.dirstate.getcwd()
637 649
638 650 def pathto(self, f, cwd=None):
639 651 return self.dirstate.pathto(f, cwd)
640 652
641 653 def wfile(self, f, mode='r'):
642 654 return self.wopener(f, mode)
643 655
644 656 def _link(self, f):
645 657 return os.path.islink(self.wjoin(f))
646 658
647 659 def _loadfilter(self, filter):
648 660 if filter not in self.filterpats:
649 661 l = []
650 662 for pat, cmd in self.ui.configitems(filter):
651 663 if cmd == '!':
652 664 continue
653 665 mf = matchmod.match(self.root, '', [pat])
654 666 fn = None
655 667 params = cmd
656 668 for name, filterfn in self._datafilters.iteritems():
657 669 if cmd.startswith(name):
658 670 fn = filterfn
659 671 params = cmd[len(name):].lstrip()
660 672 break
661 673 if not fn:
662 674 fn = lambda s, c, **kwargs: util.filter(s, c)
663 675 # Wrap old filters not supporting keyword arguments
664 676 if not inspect.getargspec(fn)[2]:
665 677 oldfn = fn
666 678 fn = lambda s, c, **kwargs: oldfn(s, c)
667 679 l.append((mf, fn, params))
668 680 self.filterpats[filter] = l
669 681 return self.filterpats[filter]
670 682
671 683 def _filter(self, filterpats, filename, data):
672 684 for mf, fn, cmd in filterpats:
673 685 if mf(filename):
674 686 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
675 687 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
676 688 break
677 689
678 690 return data
679 691
680 692 @propertycache
681 693 def _encodefilterpats(self):
682 694 return self._loadfilter('encode')
683 695
684 696 @propertycache
685 697 def _decodefilterpats(self):
686 698 return self._loadfilter('decode')
687 699
688 700 def adddatafilter(self, name, filter):
689 701 self._datafilters[name] = filter
690 702
691 703 def wread(self, filename):
692 704 if self._link(filename):
693 705 data = os.readlink(self.wjoin(filename))
694 706 else:
695 707 data = self.wopener.read(filename)
696 708 return self._filter(self._encodefilterpats, filename, data)
697 709
698 710 def wwrite(self, filename, data, flags):
699 711 data = self._filter(self._decodefilterpats, filename, data)
700 712 if 'l' in flags:
701 713 self.wopener.symlink(data, filename)
702 714 else:
703 715 self.wopener.write(filename, data)
704 716 if 'x' in flags:
705 717 util.setflags(self.wjoin(filename), False, True)
706 718
707 719 def wwritedata(self, filename, data):
708 720 return self._filter(self._decodefilterpats, filename, data)
709 721
710 722 def transaction(self, desc):
711 723 tr = self._transref and self._transref() or None
712 724 if tr and tr.running():
713 725 return tr.nest()
714 726
715 727 # abort here if the journal already exists
716 728 if os.path.exists(self.sjoin("journal")):
717 729 raise error.RepoError(
718 730 _("abandoned transaction found - run hg recover"))
719 731
720 732 journalfiles = self._writejournal(desc)
721 733 renames = [(x, undoname(x)) for x in journalfiles]
722 734
723 735 tr = transaction.transaction(self.ui.warn, self.sopener,
724 736 self.sjoin("journal"),
725 737 aftertrans(renames),
726 738 self.store.createmode)
727 739 self._transref = weakref.ref(tr)
728 740 return tr
729 741
730 742 def _writejournal(self, desc):
731 743 # save dirstate for rollback
732 744 try:
733 745 ds = self.opener.read("dirstate")
734 746 except IOError:
735 747 ds = ""
736 748 self.opener.write("journal.dirstate", ds)
737 749 self.opener.write("journal.branch",
738 750 encoding.fromlocal(self.dirstate.branch()))
739 751 self.opener.write("journal.desc",
740 752 "%d\n%s\n" % (len(self), desc))
741 753
742 754 bkname = self.join('bookmarks')
743 755 if os.path.exists(bkname):
744 756 util.copyfile(bkname, self.join('journal.bookmarks'))
745 757 else:
746 758 self.opener.write('journal.bookmarks', '')
747 759
748 760 return (self.sjoin('journal'), self.join('journal.dirstate'),
749 761 self.join('journal.branch'), self.join('journal.desc'),
750 762 self.join('journal.bookmarks'))
751 763
752 764 def recover(self):
753 765 lock = self.lock()
754 766 try:
755 767 if os.path.exists(self.sjoin("journal")):
756 768 self.ui.status(_("rolling back interrupted transaction\n"))
757 769 transaction.rollback(self.sopener, self.sjoin("journal"),
758 770 self.ui.warn)
759 771 self.invalidate()
760 772 return True
761 773 else:
762 774 self.ui.warn(_("no interrupted transaction available\n"))
763 775 return False
764 776 finally:
765 777 lock.release()
766 778
767 779 def rollback(self, dryrun=False, force=False):
768 780 wlock = lock = None
769 781 try:
770 782 wlock = self.wlock()
771 783 lock = self.lock()
772 784 if os.path.exists(self.sjoin("undo")):
773 785 return self._rollback(dryrun, force)
774 786 else:
775 787 self.ui.warn(_("no rollback information available\n"))
776 788 return 1
777 789 finally:
778 790 release(lock, wlock)
779 791
780 792 def _rollback(self, dryrun, force):
781 793 ui = self.ui
782 794 try:
783 795 args = self.opener.read('undo.desc').splitlines()
784 796 (oldlen, desc, detail) = (int(args[0]), args[1], None)
785 797 if len(args) >= 3:
786 798 detail = args[2]
787 799 oldtip = oldlen - 1
788 800
789 801 if detail and ui.verbose:
790 802 msg = (_('repository tip rolled back to revision %s'
791 803 ' (undo %s: %s)\n')
792 804 % (oldtip, desc, detail))
793 805 else:
794 806 msg = (_('repository tip rolled back to revision %s'
795 807 ' (undo %s)\n')
796 808 % (oldtip, desc))
797 809 except IOError:
798 810 msg = _('rolling back unknown transaction\n')
799 811 desc = None
800 812
801 813 if not force and self['.'] != self['tip'] and desc == 'commit':
802 814 raise util.Abort(
803 815 _('rollback of last commit while not checked out '
804 816 'may lose data'), hint=_('use -f to force'))
805 817
806 818 ui.status(msg)
807 819 if dryrun:
808 820 return 0
809 821
810 822 parents = self.dirstate.parents()
811 823 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
812 824 if os.path.exists(self.join('undo.bookmarks')):
813 825 util.rename(self.join('undo.bookmarks'),
814 826 self.join('bookmarks'))
815 827 self.invalidate()
816 828
817 829 parentgone = (parents[0] not in self.changelog.nodemap or
818 830 parents[1] not in self.changelog.nodemap)
819 831 if parentgone:
820 832 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
821 833 try:
822 834 branch = self.opener.read('undo.branch')
823 835 self.dirstate.setbranch(branch)
824 836 except IOError:
825 837 ui.warn(_('named branch could not be reset: '
826 838 'current branch is still \'%s\'\n')
827 839 % self.dirstate.branch())
828 840
829 841 self.dirstate.invalidate()
830 842 self.destroyed()
831 843 parents = tuple([p.rev() for p in self.parents()])
832 844 if len(parents) > 1:
833 845 ui.status(_('working directory now based on '
834 846 'revisions %d and %d\n') % parents)
835 847 else:
836 848 ui.status(_('working directory now based on '
837 849 'revision %d\n') % parents)
838 850 return 0
839 851
840 852 def invalidatecaches(self):
841 853 try:
842 854 delattr(self, '_tagscache')
843 855 except AttributeError:
844 856 pass
845 857
846 858 self._branchcache = None # in UTF-8
847 859 self._branchcachetip = None
848 860
849 861 def invalidatedirstate(self):
850 862 '''Invalidates the dirstate, causing the next call to dirstate
851 863 to check if it was modified since the last time it was read,
852 864 rereading it if it has.
853 865
854 866 This is different to dirstate.invalidate() that it doesn't always
855 867 rereads the dirstate. Use dirstate.invalidate() if you want to
856 868 explicitly read the dirstate again (i.e. restoring it to a previous
857 869 known good state).'''
858 870 try:
859 871 delattr(self, 'dirstate')
860 872 except AttributeError:
861 873 pass
862 874
863 875 def invalidate(self):
864 876 for k in self._filecache:
865 877 # dirstate is invalidated separately in invalidatedirstate()
866 878 if k == 'dirstate':
867 879 continue
868 880
869 881 try:
870 882 delattr(self, k)
871 883 except AttributeError:
872 884 pass
873 885 self.invalidatecaches()
874 886
875 887 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
876 888 try:
877 889 l = lock.lock(lockname, 0, releasefn, desc=desc)
878 890 except error.LockHeld, inst:
879 891 if not wait:
880 892 raise
881 893 self.ui.warn(_("waiting for lock on %s held by %r\n") %
882 894 (desc, inst.locker))
883 895 # default to 600 seconds timeout
884 896 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
885 897 releasefn, desc=desc)
886 898 if acquirefn:
887 899 acquirefn()
888 900 return l
889 901
890 902 def lock(self, wait=True):
891 903 '''Lock the repository store (.hg/store) and return a weak reference
892 904 to the lock. Use this before modifying the store (e.g. committing or
893 905 stripping). If you are opening a transaction, get a lock as well.)'''
894 906 l = self._lockref and self._lockref()
895 907 if l is not None and l.held:
896 908 l.lock()
897 909 return l
898 910
899 911 def unlock():
900 912 self.store.write()
901 913 for k, ce in self._filecache.items():
902 914 if k == 'dirstate':
903 915 continue
904 916 ce.refresh()
905 917
906 918 l = self._lock(self.sjoin("lock"), wait, unlock,
907 919 self.invalidate, _('repository %s') % self.origroot)
908 920 self._lockref = weakref.ref(l)
909 921 return l
910 922
911 923 def wlock(self, wait=True):
912 924 '''Lock the non-store parts of the repository (everything under
913 925 .hg except .hg/store) and return a weak reference to the lock.
914 926 Use this before modifying files in .hg.'''
915 927 l = self._wlockref and self._wlockref()
916 928 if l is not None and l.held:
917 929 l.lock()
918 930 return l
919 931
920 932 def unlock():
921 933 self.dirstate.write()
922 934 ce = self._filecache.get('dirstate')
923 935 if ce:
924 936 ce.refresh()
925 937
926 938 l = self._lock(self.join("wlock"), wait, unlock,
927 939 self.invalidatedirstate, _('working directory of %s') %
928 940 self.origroot)
929 941 self._wlockref = weakref.ref(l)
930 942 return l
931 943
932 944 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
933 945 """
934 946 commit an individual file as part of a larger transaction
935 947 """
936 948
937 949 fname = fctx.path()
938 950 text = fctx.data()
939 951 flog = self.file(fname)
940 952 fparent1 = manifest1.get(fname, nullid)
941 953 fparent2 = fparent2o = manifest2.get(fname, nullid)
942 954
943 955 meta = {}
944 956 copy = fctx.renamed()
945 957 if copy and copy[0] != fname:
946 958 # Mark the new revision of this file as a copy of another
947 959 # file. This copy data will effectively act as a parent
948 960 # of this new revision. If this is a merge, the first
949 961 # parent will be the nullid (meaning "look up the copy data")
950 962 # and the second one will be the other parent. For example:
951 963 #
952 964 # 0 --- 1 --- 3 rev1 changes file foo
953 965 # \ / rev2 renames foo to bar and changes it
954 966 # \- 2 -/ rev3 should have bar with all changes and
955 967 # should record that bar descends from
956 968 # bar in rev2 and foo in rev1
957 969 #
958 970 # this allows this merge to succeed:
959 971 #
960 972 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
961 973 # \ / merging rev3 and rev4 should use bar@rev2
962 974 # \- 2 --- 4 as the merge base
963 975 #
964 976
965 977 cfname = copy[0]
966 978 crev = manifest1.get(cfname)
967 979 newfparent = fparent2
968 980
969 981 if manifest2: # branch merge
970 982 if fparent2 == nullid or crev is None: # copied on remote side
971 983 if cfname in manifest2:
972 984 crev = manifest2[cfname]
973 985 newfparent = fparent1
974 986
975 987 # find source in nearest ancestor if we've lost track
976 988 if not crev:
977 989 self.ui.debug(" %s: searching for copy revision for %s\n" %
978 990 (fname, cfname))
979 991 for ancestor in self[None].ancestors():
980 992 if cfname in ancestor:
981 993 crev = ancestor[cfname].filenode()
982 994 break
983 995
984 996 if crev:
985 997 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
986 998 meta["copy"] = cfname
987 999 meta["copyrev"] = hex(crev)
988 1000 fparent1, fparent2 = nullid, newfparent
989 1001 else:
990 1002 self.ui.warn(_("warning: can't find ancestor for '%s' "
991 1003 "copied from '%s'!\n") % (fname, cfname))
992 1004
993 1005 elif fparent2 != nullid:
994 1006 # is one parent an ancestor of the other?
995 1007 fparentancestor = flog.ancestor(fparent1, fparent2)
996 1008 if fparentancestor == fparent1:
997 1009 fparent1, fparent2 = fparent2, nullid
998 1010 elif fparentancestor == fparent2:
999 1011 fparent2 = nullid
1000 1012
1001 1013 # is the file changed?
1002 1014 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1003 1015 changelist.append(fname)
1004 1016 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1005 1017
1006 1018 # are just the flags changed during merge?
1007 1019 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1008 1020 changelist.append(fname)
1009 1021
1010 1022 return fparent1
1011 1023
1012 1024 def commit(self, text="", user=None, date=None, match=None, force=False,
1013 1025 editor=False, extra={}):
1014 1026 """Add a new revision to current repository.
1015 1027
1016 1028 Revision information is gathered from the working directory,
1017 1029 match can be used to filter the committed files. If editor is
1018 1030 supplied, it is called to get a commit message.
1019 1031 """
1020 1032
1021 1033 def fail(f, msg):
1022 1034 raise util.Abort('%s: %s' % (f, msg))
1023 1035
1024 1036 if not match:
1025 1037 match = matchmod.always(self.root, '')
1026 1038
1027 1039 if not force:
1028 1040 vdirs = []
1029 1041 match.dir = vdirs.append
1030 1042 match.bad = fail
1031 1043
1032 1044 wlock = self.wlock()
1033 1045 try:
1034 1046 wctx = self[None]
1035 1047 merge = len(wctx.parents()) > 1
1036 1048
1037 1049 if (not force and merge and match and
1038 1050 (match.files() or match.anypats())):
1039 1051 raise util.Abort(_('cannot partially commit a merge '
1040 1052 '(do not specify files or patterns)'))
1041 1053
1042 1054 changes = self.status(match=match, clean=force)
1043 1055 if force:
1044 1056 changes[0].extend(changes[6]) # mq may commit unchanged files
1045 1057
1046 1058 # check subrepos
1047 1059 subs = []
1048 1060 removedsubs = set()
1049 1061 if '.hgsub' in wctx:
1050 1062 # only manage subrepos and .hgsubstate if .hgsub is present
1051 1063 for p in wctx.parents():
1052 1064 removedsubs.update(s for s in p.substate if match(s))
1053 1065 for s in wctx.substate:
1054 1066 removedsubs.discard(s)
1055 1067 if match(s) and wctx.sub(s).dirty():
1056 1068 subs.append(s)
1057 1069 if (subs or removedsubs):
1058 1070 if (not match('.hgsub') and
1059 1071 '.hgsub' in (wctx.modified() + wctx.added())):
1060 1072 raise util.Abort(
1061 1073 _("can't commit subrepos without .hgsub"))
1062 1074 if '.hgsubstate' not in changes[0]:
1063 1075 changes[0].insert(0, '.hgsubstate')
1064 1076 if '.hgsubstate' in changes[2]:
1065 1077 changes[2].remove('.hgsubstate')
1066 1078 elif '.hgsub' in changes[2]:
1067 1079 # clean up .hgsubstate when .hgsub is removed
1068 1080 if ('.hgsubstate' in wctx and
1069 1081 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1070 1082 changes[2].insert(0, '.hgsubstate')
1071 1083
1072 1084 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1073 1085 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1074 1086 if changedsubs:
1075 1087 raise util.Abort(_("uncommitted changes in subrepo %s")
1076 1088 % changedsubs[0],
1077 1089 hint=_("use --subrepos for recursive commit"))
1078 1090
1079 1091 # make sure all explicit patterns are matched
1080 1092 if not force and match.files():
1081 1093 matched = set(changes[0] + changes[1] + changes[2])
1082 1094
1083 1095 for f in match.files():
1084 1096 if f == '.' or f in matched or f in wctx.substate:
1085 1097 continue
1086 1098 if f in changes[3]: # missing
1087 1099 fail(f, _('file not found!'))
1088 1100 if f in vdirs: # visited directory
1089 1101 d = f + '/'
1090 1102 for mf in matched:
1091 1103 if mf.startswith(d):
1092 1104 break
1093 1105 else:
1094 1106 fail(f, _("no match under directory!"))
1095 1107 elif f not in self.dirstate:
1096 1108 fail(f, _("file not tracked!"))
1097 1109
1098 1110 if (not force and not extra.get("close") and not merge
1099 1111 and not (changes[0] or changes[1] or changes[2])
1100 1112 and wctx.branch() == wctx.p1().branch()):
1101 1113 return None
1102 1114
1103 1115 ms = mergemod.mergestate(self)
1104 1116 for f in changes[0]:
1105 1117 if f in ms and ms[f] == 'u':
1106 1118 raise util.Abort(_("unresolved merge conflicts "
1107 1119 "(see hg help resolve)"))
1108 1120
1109 1121 cctx = context.workingctx(self, text, user, date, extra, changes)
1110 1122 if editor:
1111 1123 cctx._text = editor(self, cctx, subs)
1112 1124 edited = (text != cctx._text)
1113 1125
1114 1126 # commit subs
1115 1127 if subs or removedsubs:
1116 1128 state = wctx.substate.copy()
1117 1129 for s in sorted(subs):
1118 1130 sub = wctx.sub(s)
1119 1131 self.ui.status(_('committing subrepository %s\n') %
1120 1132 subrepo.subrelpath(sub))
1121 1133 sr = sub.commit(cctx._text, user, date)
1122 1134 state[s] = (state[s][0], sr)
1123 1135 subrepo.writestate(self, state)
1124 1136
1125 1137 # Save commit message in case this transaction gets rolled back
1126 1138 # (e.g. by a pretxncommit hook). Leave the content alone on
1127 1139 # the assumption that the user will use the same editor again.
1128 1140 msgfn = self.savecommitmessage(cctx._text)
1129 1141
1130 1142 p1, p2 = self.dirstate.parents()
1131 1143 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1132 1144 try:
1133 1145 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1134 1146 ret = self.commitctx(cctx, True)
1135 1147 except:
1136 1148 if edited:
1137 1149 self.ui.write(
1138 1150 _('note: commit message saved in %s\n') % msgfn)
1139 1151 raise
1140 1152
1141 1153 # update bookmarks, dirstate and mergestate
1142 1154 bookmarks.update(self, p1, ret)
1143 1155 for f in changes[0] + changes[1]:
1144 1156 self.dirstate.normal(f)
1145 1157 for f in changes[2]:
1146 1158 self.dirstate.drop(f)
1147 1159 self.dirstate.setparents(ret)
1148 1160 ms.reset()
1149 1161 finally:
1150 1162 wlock.release()
1151 1163
1152 1164 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1153 1165 return ret
1154 1166
1155 1167 def commitctx(self, ctx, error=False):
1156 1168 """Add a new revision to current repository.
1157 1169 Revision information is passed via the context argument.
1158 1170 """
1159 1171
1160 1172 tr = lock = None
1161 1173 removed = list(ctx.removed())
1162 1174 p1, p2 = ctx.p1(), ctx.p2()
1163 1175 user = ctx.user()
1164 1176
1165 1177 lock = self.lock()
1166 1178 try:
1167 1179 tr = self.transaction("commit")
1168 1180 trp = weakref.proxy(tr)
1169 1181
1170 1182 if ctx.files():
1171 1183 m1 = p1.manifest().copy()
1172 1184 m2 = p2.manifest()
1173 1185
1174 1186 # check in files
1175 1187 new = {}
1176 1188 changed = []
1177 1189 linkrev = len(self)
1178 1190 for f in sorted(ctx.modified() + ctx.added()):
1179 1191 self.ui.note(f + "\n")
1180 1192 try:
1181 1193 fctx = ctx[f]
1182 1194 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1183 1195 changed)
1184 1196 m1.set(f, fctx.flags())
1185 1197 except OSError, inst:
1186 1198 self.ui.warn(_("trouble committing %s!\n") % f)
1187 1199 raise
1188 1200 except IOError, inst:
1189 1201 errcode = getattr(inst, 'errno', errno.ENOENT)
1190 1202 if error or errcode and errcode != errno.ENOENT:
1191 1203 self.ui.warn(_("trouble committing %s!\n") % f)
1192 1204 raise
1193 1205 else:
1194 1206 removed.append(f)
1195 1207
1196 1208 # update manifest
1197 1209 m1.update(new)
1198 1210 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1199 1211 drop = [f for f in removed if f in m1]
1200 1212 for f in drop:
1201 1213 del m1[f]
1202 1214 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1203 1215 p2.manifestnode(), (new, drop))
1204 1216 files = changed + removed
1205 1217 else:
1206 1218 mn = p1.manifestnode()
1207 1219 files = []
1208 1220
1209 1221 # update changelog
1210 1222 self.changelog.delayupdate()
1211 1223 n = self.changelog.add(mn, files, ctx.description(),
1212 1224 trp, p1.node(), p2.node(),
1213 1225 user, ctx.date(), ctx.extra().copy())
1214 1226 p = lambda: self.changelog.writepending() and self.root or ""
1215 1227 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1216 1228 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1217 1229 parent2=xp2, pending=p)
1218 1230 self.changelog.finalize(trp)
1219 1231 tr.close()
1220 1232
1221 1233 if self._branchcache:
1222 1234 self.updatebranchcache()
1223 1235 return n
1224 1236 finally:
1225 1237 if tr:
1226 1238 tr.release()
1227 1239 lock.release()
1228 1240
1229 1241 def destroyed(self):
1230 1242 '''Inform the repository that nodes have been destroyed.
1231 1243 Intended for use by strip and rollback, so there's a common
1232 1244 place for anything that has to be done after destroying history.'''
1233 1245 # XXX it might be nice if we could take the list of destroyed
1234 1246 # nodes, but I don't see an easy way for rollback() to do that
1235 1247
1236 1248 # Ensure the persistent tag cache is updated. Doing it now
1237 1249 # means that the tag cache only has to worry about destroyed
1238 1250 # heads immediately after a strip/rollback. That in turn
1239 1251 # guarantees that "cachetip == currenttip" (comparing both rev
1240 1252 # and node) always means no nodes have been added or destroyed.
1241 1253
1242 1254 # XXX this is suboptimal when qrefresh'ing: we strip the current
1243 1255 # head, refresh the tag cache, then immediately add a new head.
1244 1256 # But I think doing it this way is necessary for the "instant
1245 1257 # tag cache retrieval" case to work.
1246 1258 self.invalidatecaches()
1247 1259
1248 1260 def walk(self, match, node=None):
1249 1261 '''
1250 1262 walk recursively through the directory tree or a given
1251 1263 changeset, finding all files matched by the match
1252 1264 function
1253 1265 '''
1254 1266 return self[node].walk(match)
1255 1267
1256 1268 def status(self, node1='.', node2=None, match=None,
1257 1269 ignored=False, clean=False, unknown=False,
1258 1270 listsubrepos=False):
1259 1271 """return status of files between two nodes or node and working directory
1260 1272
1261 1273 If node1 is None, use the first dirstate parent instead.
1262 1274 If node2 is None, compare node1 with working directory.
1263 1275 """
1264 1276
1265 1277 def mfmatches(ctx):
1266 1278 mf = ctx.manifest().copy()
1267 1279 for fn in mf.keys():
1268 1280 if not match(fn):
1269 1281 del mf[fn]
1270 1282 return mf
1271 1283
1272 1284 if isinstance(node1, context.changectx):
1273 1285 ctx1 = node1
1274 1286 else:
1275 1287 ctx1 = self[node1]
1276 1288 if isinstance(node2, context.changectx):
1277 1289 ctx2 = node2
1278 1290 else:
1279 1291 ctx2 = self[node2]
1280 1292
1281 1293 working = ctx2.rev() is None
1282 1294 parentworking = working and ctx1 == self['.']
1283 1295 match = match or matchmod.always(self.root, self.getcwd())
1284 1296 listignored, listclean, listunknown = ignored, clean, unknown
1285 1297
1286 1298 # load earliest manifest first for caching reasons
1287 1299 if not working and ctx2.rev() < ctx1.rev():
1288 1300 ctx2.manifest()
1289 1301
1290 1302 if not parentworking:
1291 1303 def bad(f, msg):
1292 1304 if f not in ctx1:
1293 1305 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1294 1306 match.bad = bad
1295 1307
1296 1308 if working: # we need to scan the working dir
1297 1309 subrepos = []
1298 1310 if '.hgsub' in self.dirstate:
1299 1311 subrepos = ctx2.substate.keys()
1300 1312 s = self.dirstate.status(match, subrepos, listignored,
1301 1313 listclean, listunknown)
1302 1314 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1303 1315
1304 1316 # check for any possibly clean files
1305 1317 if parentworking and cmp:
1306 1318 fixup = []
1307 1319 # do a full compare of any files that might have changed
1308 1320 for f in sorted(cmp):
1309 1321 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1310 1322 or ctx1[f].cmp(ctx2[f])):
1311 1323 modified.append(f)
1312 1324 else:
1313 1325 fixup.append(f)
1314 1326
1315 1327 # update dirstate for files that are actually clean
1316 1328 if fixup:
1317 1329 if listclean:
1318 1330 clean += fixup
1319 1331
1320 1332 try:
1321 1333 # updating the dirstate is optional
1322 1334 # so we don't wait on the lock
1323 1335 wlock = self.wlock(False)
1324 1336 try:
1325 1337 for f in fixup:
1326 1338 self.dirstate.normal(f)
1327 1339 finally:
1328 1340 wlock.release()
1329 1341 except error.LockError:
1330 1342 pass
1331 1343
1332 1344 if not parentworking:
1333 1345 mf1 = mfmatches(ctx1)
1334 1346 if working:
1335 1347 # we are comparing working dir against non-parent
1336 1348 # generate a pseudo-manifest for the working dir
1337 1349 mf2 = mfmatches(self['.'])
1338 1350 for f in cmp + modified + added:
1339 1351 mf2[f] = None
1340 1352 mf2.set(f, ctx2.flags(f))
1341 1353 for f in removed:
1342 1354 if f in mf2:
1343 1355 del mf2[f]
1344 1356 else:
1345 1357 # we are comparing two revisions
1346 1358 deleted, unknown, ignored = [], [], []
1347 1359 mf2 = mfmatches(ctx2)
1348 1360
1349 1361 modified, added, clean = [], [], []
1350 1362 for fn in mf2:
1351 1363 if fn in mf1:
1352 1364 if (fn not in deleted and
1353 1365 (mf1.flags(fn) != mf2.flags(fn) or
1354 1366 (mf1[fn] != mf2[fn] and
1355 1367 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1356 1368 modified.append(fn)
1357 1369 elif listclean:
1358 1370 clean.append(fn)
1359 1371 del mf1[fn]
1360 1372 elif fn not in deleted:
1361 1373 added.append(fn)
1362 1374 removed = mf1.keys()
1363 1375
1364 1376 if working and modified and not self.dirstate._checklink:
1365 1377 # Symlink placeholders may get non-symlink-like contents
1366 1378 # via user error or dereferencing by NFS or Samba servers,
1367 1379 # so we filter out any placeholders that don't look like a
1368 1380 # symlink
1369 1381 sane = []
1370 1382 for f in modified:
1371 1383 if ctx2.flags(f) == 'l':
1372 1384 d = ctx2[f].data()
1373 1385 if len(d) >= 1024 or '\n' in d or util.binary(d):
1374 1386 self.ui.debug('ignoring suspect symlink placeholder'
1375 1387 ' "%s"\n' % f)
1376 1388 continue
1377 1389 sane.append(f)
1378 1390 modified = sane
1379 1391
1380 1392 r = modified, added, removed, deleted, unknown, ignored, clean
1381 1393
1382 1394 if listsubrepos:
1383 1395 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1384 1396 if working:
1385 1397 rev2 = None
1386 1398 else:
1387 1399 rev2 = ctx2.substate[subpath][1]
1388 1400 try:
1389 1401 submatch = matchmod.narrowmatcher(subpath, match)
1390 1402 s = sub.status(rev2, match=submatch, ignored=listignored,
1391 1403 clean=listclean, unknown=listunknown,
1392 1404 listsubrepos=True)
1393 1405 for rfiles, sfiles in zip(r, s):
1394 1406 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1395 1407 except error.LookupError:
1396 1408 self.ui.status(_("skipping missing subrepository: %s\n")
1397 1409 % subpath)
1398 1410
1399 1411 for l in r:
1400 1412 l.sort()
1401 1413 return r
1402 1414
1403 1415 def heads(self, start=None):
1404 1416 heads = self.changelog.heads(start)
1405 1417 # sort the output in rev descending order
1406 1418 return sorted(heads, key=self.changelog.rev, reverse=True)
1407 1419
1408 1420 def branchheads(self, branch=None, start=None, closed=False):
1409 1421 '''return a (possibly filtered) list of heads for the given branch
1410 1422
1411 1423 Heads are returned in topological order, from newest to oldest.
1412 1424 If branch is None, use the dirstate branch.
1413 1425 If start is not None, return only heads reachable from start.
1414 1426 If closed is True, return heads that are marked as closed as well.
1415 1427 '''
1416 1428 if branch is None:
1417 1429 branch = self[None].branch()
1418 1430 branches = self.branchmap()
1419 1431 if branch not in branches:
1420 1432 return []
1421 1433 # the cache returns heads ordered lowest to highest
1422 1434 bheads = list(reversed(branches[branch]))
1423 1435 if start is not None:
1424 1436 # filter out the heads that cannot be reached from startrev
1425 1437 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1426 1438 bheads = [h for h in bheads if h in fbheads]
1427 1439 if not closed:
1428 1440 bheads = [h for h in bheads if
1429 1441 ('close' not in self.changelog.read(h)[5])]
1430 1442 return bheads
1431 1443
1432 1444 def branches(self, nodes):
1433 1445 if not nodes:
1434 1446 nodes = [self.changelog.tip()]
1435 1447 b = []
1436 1448 for n in nodes:
1437 1449 t = n
1438 1450 while True:
1439 1451 p = self.changelog.parents(n)
1440 1452 if p[1] != nullid or p[0] == nullid:
1441 1453 b.append((t, n, p[0], p[1]))
1442 1454 break
1443 1455 n = p[0]
1444 1456 return b
1445 1457
1446 1458 def between(self, pairs):
1447 1459 r = []
1448 1460
1449 1461 for top, bottom in pairs:
1450 1462 n, l, i = top, [], 0
1451 1463 f = 1
1452 1464
1453 1465 while n != bottom and n != nullid:
1454 1466 p = self.changelog.parents(n)[0]
1455 1467 if i == f:
1456 1468 l.append(n)
1457 1469 f = f * 2
1458 1470 n = p
1459 1471 i += 1
1460 1472
1461 1473 r.append(l)
1462 1474
1463 1475 return r
1464 1476
1465 1477 def pull(self, remote, heads=None, force=False):
1466 1478 lock = self.lock()
1467 1479 try:
1468 1480 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1469 1481 force=force)
1470 1482 common, fetch, rheads = tmp
1471 1483 if not fetch:
1472 1484 self.ui.status(_("no changes found\n"))
1473 1485 result = 0
1474 1486 else:
1475 1487 if heads is None and list(common) == [nullid]:
1476 1488 self.ui.status(_("requesting all changes\n"))
1477 1489 elif heads is None and remote.capable('changegroupsubset'):
1478 1490 # issue1320, avoid a race if remote changed after discovery
1479 1491 heads = rheads
1480 1492
1481 1493 if remote.capable('getbundle'):
1482 1494 cg = remote.getbundle('pull', common=common,
1483 1495 heads=heads or rheads)
1484 1496 elif heads is None:
1485 1497 cg = remote.changegroup(fetch, 'pull')
1486 1498 elif not remote.capable('changegroupsubset'):
1487 1499 raise util.Abort(_("partial pull cannot be done because "
1488 1500 "other repository doesn't support "
1489 1501 "changegroupsubset."))
1490 1502 else:
1491 1503 cg = remote.changegroupsubset(fetch, heads, 'pull')
1492 1504 result = self.addchangegroup(cg, 'pull', remote.url(),
1493 1505 lock=lock)
1494 1506 finally:
1495 1507 lock.release()
1496 1508
1497 1509 return result
1498 1510
1499 1511 def checkpush(self, force, revs):
1500 1512 """Extensions can override this function if additional checks have
1501 1513 to be performed before pushing, or call it if they override push
1502 1514 command.
1503 1515 """
1504 1516 pass
1505 1517
1506 1518 def push(self, remote, force=False, revs=None, newbranch=False):
1507 1519 '''Push outgoing changesets (limited by revs) from the current
1508 1520 repository to remote. Return an integer:
1509 1521 - 0 means HTTP error *or* nothing to push
1510 1522 - 1 means we pushed and remote head count is unchanged *or*
1511 1523 we have outgoing changesets but refused to push
1512 1524 - other values as described by addchangegroup()
1513 1525 '''
1514 1526 # there are two ways to push to remote repo:
1515 1527 #
1516 1528 # addchangegroup assumes local user can lock remote
1517 1529 # repo (local filesystem, old ssh servers).
1518 1530 #
1519 1531 # unbundle assumes local user cannot lock remote repo (new ssh
1520 1532 # servers, http servers).
1521 1533
1522 1534 self.checkpush(force, revs)
1523 1535 lock = None
1524 1536 unbundle = remote.capable('unbundle')
1525 1537 if not unbundle:
1526 1538 lock = remote.lock()
1527 1539 try:
1528 1540 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1529 1541 newbranch)
1530 1542 ret = remote_heads
1531 1543 if cg is not None:
1532 1544 if unbundle:
1533 1545 # local repo finds heads on server, finds out what
1534 1546 # revs it must push. once revs transferred, if server
1535 1547 # finds it has different heads (someone else won
1536 1548 # commit/push race), server aborts.
1537 1549 if force:
1538 1550 remote_heads = ['force']
1539 1551 # ssh: return remote's addchangegroup()
1540 1552 # http: return remote's addchangegroup() or 0 for error
1541 1553 ret = remote.unbundle(cg, remote_heads, 'push')
1542 1554 else:
1543 1555 # we return an integer indicating remote head count change
1544 1556 ret = remote.addchangegroup(cg, 'push', self.url(),
1545 1557 lock=lock)
1546 1558 finally:
1547 1559 if lock is not None:
1548 1560 lock.release()
1549 1561
1550 1562 self.ui.debug("checking for updated bookmarks\n")
1551 1563 rb = remote.listkeys('bookmarks')
1552 1564 for k in rb.keys():
1553 1565 if k in self._bookmarks:
1554 1566 nr, nl = rb[k], hex(self._bookmarks[k])
1555 1567 if nr in self:
1556 1568 cr = self[nr]
1557 1569 cl = self[nl]
1558 1570 if cl in cr.descendants():
1559 1571 r = remote.pushkey('bookmarks', k, nr, nl)
1560 1572 if r:
1561 1573 self.ui.status(_("updating bookmark %s\n") % k)
1562 1574 else:
1563 1575 self.ui.warn(_('updating bookmark %s'
1564 1576 ' failed!\n') % k)
1565 1577
1566 1578 return ret
1567 1579
1568 1580 def changegroupinfo(self, nodes, source):
1569 1581 if self.ui.verbose or source == 'bundle':
1570 1582 self.ui.status(_("%d changesets found\n") % len(nodes))
1571 1583 if self.ui.debugflag:
1572 1584 self.ui.debug("list of changesets:\n")
1573 1585 for node in nodes:
1574 1586 self.ui.debug("%s\n" % hex(node))
1575 1587
1576 1588 def changegroupsubset(self, bases, heads, source):
1577 1589 """Compute a changegroup consisting of all the nodes that are
1578 1590 descendants of any of the bases and ancestors of any of the heads.
1579 1591 Return a chunkbuffer object whose read() method will return
1580 1592 successive changegroup chunks.
1581 1593
1582 1594 It is fairly complex as determining which filenodes and which
1583 1595 manifest nodes need to be included for the changeset to be complete
1584 1596 is non-trivial.
1585 1597
1586 1598 Another wrinkle is doing the reverse, figuring out which changeset in
1587 1599 the changegroup a particular filenode or manifestnode belongs to.
1588 1600 """
1589 1601 cl = self.changelog
1590 1602 if not bases:
1591 1603 bases = [nullid]
1592 1604 csets, bases, heads = cl.nodesbetween(bases, heads)
1593 1605 # We assume that all ancestors of bases are known
1594 1606 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1595 1607 return self._changegroupsubset(common, csets, heads, source)
1596 1608
1597 1609 def getbundle(self, source, heads=None, common=None):
1598 1610 """Like changegroupsubset, but returns the set difference between the
1599 1611 ancestors of heads and the ancestors common.
1600 1612
1601 1613 If heads is None, use the local heads. If common is None, use [nullid].
1602 1614
1603 1615 The nodes in common might not all be known locally due to the way the
1604 1616 current discovery protocol works.
1605 1617 """
1606 1618 cl = self.changelog
1607 1619 if common:
1608 1620 nm = cl.nodemap
1609 1621 common = [n for n in common if n in nm]
1610 1622 else:
1611 1623 common = [nullid]
1612 1624 if not heads:
1613 1625 heads = cl.heads()
1614 1626 common, missing = cl.findcommonmissing(common, heads)
1615 1627 if not missing:
1616 1628 return None
1617 1629 return self._changegroupsubset(common, missing, heads, source)
1618 1630
1619 1631 def _changegroupsubset(self, commonrevs, csets, heads, source):
1620 1632
1621 1633 cl = self.changelog
1622 1634 mf = self.manifest
1623 1635 mfs = {} # needed manifests
1624 1636 fnodes = {} # needed file nodes
1625 1637 changedfiles = set()
1626 1638 fstate = ['', {}]
1627 1639 count = [0]
1628 1640
1629 1641 # can we go through the fast path ?
1630 1642 heads.sort()
1631 1643 if heads == sorted(self.heads()):
1632 1644 return self._changegroup(csets, source)
1633 1645
1634 1646 # slow path
1635 1647 self.hook('preoutgoing', throw=True, source=source)
1636 1648 self.changegroupinfo(csets, source)
1637 1649
1638 1650 # filter any nodes that claim to be part of the known set
1639 1651 def prune(revlog, missing):
1640 1652 return [n for n in missing
1641 1653 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1642 1654
1643 1655 def lookup(revlog, x):
1644 1656 if revlog == cl:
1645 1657 c = cl.read(x)
1646 1658 changedfiles.update(c[3])
1647 1659 mfs.setdefault(c[0], x)
1648 1660 count[0] += 1
1649 1661 self.ui.progress(_('bundling'), count[0],
1650 1662 unit=_('changesets'), total=len(csets))
1651 1663 return x
1652 1664 elif revlog == mf:
1653 1665 clnode = mfs[x]
1654 1666 mdata = mf.readfast(x)
1655 1667 for f in changedfiles:
1656 1668 if f in mdata:
1657 1669 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1658 1670 count[0] += 1
1659 1671 self.ui.progress(_('bundling'), count[0],
1660 1672 unit=_('manifests'), total=len(mfs))
1661 1673 return mfs[x]
1662 1674 else:
1663 1675 self.ui.progress(
1664 1676 _('bundling'), count[0], item=fstate[0],
1665 1677 unit=_('files'), total=len(changedfiles))
1666 1678 return fstate[1][x]
1667 1679
1668 1680 bundler = changegroup.bundle10(lookup)
1669 1681 reorder = self.ui.config('bundle', 'reorder', 'auto')
1670 1682 if reorder == 'auto':
1671 1683 reorder = None
1672 1684 else:
1673 1685 reorder = util.parsebool(reorder)
1674 1686
1675 1687 def gengroup():
1676 1688 # Create a changenode group generator that will call our functions
1677 1689 # back to lookup the owning changenode and collect information.
1678 1690 for chunk in cl.group(csets, bundler, reorder=reorder):
1679 1691 yield chunk
1680 1692 self.ui.progress(_('bundling'), None)
1681 1693
1682 1694 # Create a generator for the manifestnodes that calls our lookup
1683 1695 # and data collection functions back.
1684 1696 count[0] = 0
1685 1697 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1686 1698 yield chunk
1687 1699 self.ui.progress(_('bundling'), None)
1688 1700
1689 1701 mfs.clear()
1690 1702
1691 1703 # Go through all our files in order sorted by name.
1692 1704 count[0] = 0
1693 1705 for fname in sorted(changedfiles):
1694 1706 filerevlog = self.file(fname)
1695 1707 if not len(filerevlog):
1696 1708 raise util.Abort(_("empty or missing revlog for %s") % fname)
1697 1709 fstate[0] = fname
1698 1710 fstate[1] = fnodes.pop(fname, {})
1699 1711
1700 1712 nodelist = prune(filerevlog, fstate[1])
1701 1713 if nodelist:
1702 1714 count[0] += 1
1703 1715 yield bundler.fileheader(fname)
1704 1716 for chunk in filerevlog.group(nodelist, bundler, reorder):
1705 1717 yield chunk
1706 1718
1707 1719 # Signal that no more groups are left.
1708 1720 yield bundler.close()
1709 1721 self.ui.progress(_('bundling'), None)
1710 1722
1711 1723 if csets:
1712 1724 self.hook('outgoing', node=hex(csets[0]), source=source)
1713 1725
1714 1726 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1715 1727
1716 1728 def changegroup(self, basenodes, source):
1717 1729 # to avoid a race we use changegroupsubset() (issue1320)
1718 1730 return self.changegroupsubset(basenodes, self.heads(), source)
1719 1731
1720 1732 def _changegroup(self, nodes, source):
1721 1733 """Compute the changegroup of all nodes that we have that a recipient
1722 1734 doesn't. Return a chunkbuffer object whose read() method will return
1723 1735 successive changegroup chunks.
1724 1736
1725 1737 This is much easier than the previous function as we can assume that
1726 1738 the recipient has any changenode we aren't sending them.
1727 1739
1728 1740 nodes is the set of nodes to send"""
1729 1741
1730 1742 cl = self.changelog
1731 1743 mf = self.manifest
1732 1744 mfs = {}
1733 1745 changedfiles = set()
1734 1746 fstate = ['']
1735 1747 count = [0]
1736 1748
1737 1749 self.hook('preoutgoing', throw=True, source=source)
1738 1750 self.changegroupinfo(nodes, source)
1739 1751
1740 1752 revset = set([cl.rev(n) for n in nodes])
1741 1753
1742 1754 def gennodelst(log):
1743 1755 return [log.node(r) for r in log if log.linkrev(r) in revset]
1744 1756
1745 1757 def lookup(revlog, x):
1746 1758 if revlog == cl:
1747 1759 c = cl.read(x)
1748 1760 changedfiles.update(c[3])
1749 1761 mfs.setdefault(c[0], x)
1750 1762 count[0] += 1
1751 1763 self.ui.progress(_('bundling'), count[0],
1752 1764 unit=_('changesets'), total=len(nodes))
1753 1765 return x
1754 1766 elif revlog == mf:
1755 1767 count[0] += 1
1756 1768 self.ui.progress(_('bundling'), count[0],
1757 1769 unit=_('manifests'), total=len(mfs))
1758 1770 return cl.node(revlog.linkrev(revlog.rev(x)))
1759 1771 else:
1760 1772 self.ui.progress(
1761 1773 _('bundling'), count[0], item=fstate[0],
1762 1774 total=len(changedfiles), unit=_('files'))
1763 1775 return cl.node(revlog.linkrev(revlog.rev(x)))
1764 1776
1765 1777 bundler = changegroup.bundle10(lookup)
1766 1778 reorder = self.ui.config('bundle', 'reorder', 'auto')
1767 1779 if reorder == 'auto':
1768 1780 reorder = None
1769 1781 else:
1770 1782 reorder = util.parsebool(reorder)
1771 1783
1772 1784 def gengroup():
1773 1785 '''yield a sequence of changegroup chunks (strings)'''
1774 1786 # construct a list of all changed files
1775 1787
1776 1788 for chunk in cl.group(nodes, bundler, reorder=reorder):
1777 1789 yield chunk
1778 1790 self.ui.progress(_('bundling'), None)
1779 1791
1780 1792 count[0] = 0
1781 1793 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1782 1794 yield chunk
1783 1795 self.ui.progress(_('bundling'), None)
1784 1796
1785 1797 count[0] = 0
1786 1798 for fname in sorted(changedfiles):
1787 1799 filerevlog = self.file(fname)
1788 1800 if not len(filerevlog):
1789 1801 raise util.Abort(_("empty or missing revlog for %s") % fname)
1790 1802 fstate[0] = fname
1791 1803 nodelist = gennodelst(filerevlog)
1792 1804 if nodelist:
1793 1805 count[0] += 1
1794 1806 yield bundler.fileheader(fname)
1795 1807 for chunk in filerevlog.group(nodelist, bundler, reorder):
1796 1808 yield chunk
1797 1809 yield bundler.close()
1798 1810 self.ui.progress(_('bundling'), None)
1799 1811
1800 1812 if nodes:
1801 1813 self.hook('outgoing', node=hex(nodes[0]), source=source)
1802 1814
1803 1815 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1804 1816
1805 1817 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1806 1818 """Add the changegroup returned by source.read() to this repo.
1807 1819 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1808 1820 the URL of the repo where this changegroup is coming from.
1809 1821 If lock is not None, the function takes ownership of the lock
1810 1822 and releases it after the changegroup is added.
1811 1823
1812 1824 Return an integer summarizing the change to this repo:
1813 1825 - nothing changed or no source: 0
1814 1826 - more heads than before: 1+added heads (2..n)
1815 1827 - fewer heads than before: -1-removed heads (-2..-n)
1816 1828 - number of heads stays the same: 1
1817 1829 """
1818 1830 def csmap(x):
1819 1831 self.ui.debug("add changeset %s\n" % short(x))
1820 1832 return len(cl)
1821 1833
1822 1834 def revmap(x):
1823 1835 return cl.rev(x)
1824 1836
1825 1837 if not source:
1826 1838 return 0
1827 1839
1828 1840 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1829 1841
1830 1842 changesets = files = revisions = 0
1831 1843 efiles = set()
1832 1844
1833 1845 # write changelog data to temp files so concurrent readers will not see
1834 1846 # inconsistent view
1835 1847 cl = self.changelog
1836 1848 cl.delayupdate()
1837 1849 oldheads = cl.heads()
1838 1850
1839 1851 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1840 1852 try:
1841 1853 trp = weakref.proxy(tr)
1842 1854 # pull off the changeset group
1843 1855 self.ui.status(_("adding changesets\n"))
1844 1856 clstart = len(cl)
1845 1857 class prog(object):
1846 1858 step = _('changesets')
1847 1859 count = 1
1848 1860 ui = self.ui
1849 1861 total = None
1850 1862 def __call__(self):
1851 1863 self.ui.progress(self.step, self.count, unit=_('chunks'),
1852 1864 total=self.total)
1853 1865 self.count += 1
1854 1866 pr = prog()
1855 1867 source.callback = pr
1856 1868
1857 1869 source.changelogheader()
1858 1870 if (cl.addgroup(source, csmap, trp) is None
1859 1871 and not emptyok):
1860 1872 raise util.Abort(_("received changelog group is empty"))
1861 1873 clend = len(cl)
1862 1874 changesets = clend - clstart
1863 1875 for c in xrange(clstart, clend):
1864 1876 efiles.update(self[c].files())
1865 1877 efiles = len(efiles)
1866 1878 self.ui.progress(_('changesets'), None)
1867 1879
1868 1880 # pull off the manifest group
1869 1881 self.ui.status(_("adding manifests\n"))
1870 1882 pr.step = _('manifests')
1871 1883 pr.count = 1
1872 1884 pr.total = changesets # manifests <= changesets
1873 1885 # no need to check for empty manifest group here:
1874 1886 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1875 1887 # no new manifest will be created and the manifest group will
1876 1888 # be empty during the pull
1877 1889 source.manifestheader()
1878 1890 self.manifest.addgroup(source, revmap, trp)
1879 1891 self.ui.progress(_('manifests'), None)
1880 1892
1881 1893 needfiles = {}
1882 1894 if self.ui.configbool('server', 'validate', default=False):
1883 1895 # validate incoming csets have their manifests
1884 1896 for cset in xrange(clstart, clend):
1885 1897 mfest = self.changelog.read(self.changelog.node(cset))[0]
1886 1898 mfest = self.manifest.readdelta(mfest)
1887 1899 # store file nodes we must see
1888 1900 for f, n in mfest.iteritems():
1889 1901 needfiles.setdefault(f, set()).add(n)
1890 1902
1891 1903 # process the files
1892 1904 self.ui.status(_("adding file changes\n"))
1893 1905 pr.step = _('files')
1894 1906 pr.count = 1
1895 1907 pr.total = efiles
1896 1908 source.callback = None
1897 1909
1898 1910 while True:
1899 1911 chunkdata = source.filelogheader()
1900 1912 if not chunkdata:
1901 1913 break
1902 1914 f = chunkdata["filename"]
1903 1915 self.ui.debug("adding %s revisions\n" % f)
1904 1916 pr()
1905 1917 fl = self.file(f)
1906 1918 o = len(fl)
1907 1919 if fl.addgroup(source, revmap, trp) is None:
1908 1920 raise util.Abort(_("received file revlog group is empty"))
1909 1921 revisions += len(fl) - o
1910 1922 files += 1
1911 1923 if f in needfiles:
1912 1924 needs = needfiles[f]
1913 1925 for new in xrange(o, len(fl)):
1914 1926 n = fl.node(new)
1915 1927 if n in needs:
1916 1928 needs.remove(n)
1917 1929 if not needs:
1918 1930 del needfiles[f]
1919 1931 self.ui.progress(_('files'), None)
1920 1932
1921 1933 for f, needs in needfiles.iteritems():
1922 1934 fl = self.file(f)
1923 1935 for n in needs:
1924 1936 try:
1925 1937 fl.rev(n)
1926 1938 except error.LookupError:
1927 1939 raise util.Abort(
1928 1940 _('missing file data for %s:%s - run hg verify') %
1929 1941 (f, hex(n)))
1930 1942
1931 1943 dh = 0
1932 1944 if oldheads:
1933 1945 heads = cl.heads()
1934 1946 dh = len(heads) - len(oldheads)
1935 1947 for h in heads:
1936 1948 if h not in oldheads and 'close' in self[h].extra():
1937 1949 dh -= 1
1938 1950 htext = ""
1939 1951 if dh:
1940 1952 htext = _(" (%+d heads)") % dh
1941 1953
1942 1954 self.ui.status(_("added %d changesets"
1943 1955 " with %d changes to %d files%s\n")
1944 1956 % (changesets, revisions, files, htext))
1945 1957
1946 1958 if changesets > 0:
1947 1959 p = lambda: cl.writepending() and self.root or ""
1948 1960 self.hook('pretxnchangegroup', throw=True,
1949 1961 node=hex(cl.node(clstart)), source=srctype,
1950 1962 url=url, pending=p)
1951 1963
1952 1964 # make changelog see real files again
1953 1965 cl.finalize(trp)
1954 1966
1955 1967 tr.close()
1956 1968 finally:
1957 1969 tr.release()
1958 1970 if lock:
1959 1971 lock.release()
1960 1972
1961 1973 if changesets > 0:
1962 1974 # forcefully update the on-disk branch cache
1963 1975 self.ui.debug("updating the branch cache\n")
1964 1976 self.updatebranchcache()
1965 1977 self.hook("changegroup", node=hex(cl.node(clstart)),
1966 1978 source=srctype, url=url)
1967 1979
1968 1980 for i in xrange(clstart, clend):
1969 1981 self.hook("incoming", node=hex(cl.node(i)),
1970 1982 source=srctype, url=url)
1971 1983
1972 1984 # never return 0 here:
1973 1985 if dh < 0:
1974 1986 return dh - 1
1975 1987 else:
1976 1988 return dh + 1
1977 1989
1978 1990 def stream_in(self, remote, requirements):
1979 1991 lock = self.lock()
1980 1992 try:
1981 1993 fp = remote.stream_out()
1982 1994 l = fp.readline()
1983 1995 try:
1984 1996 resp = int(l)
1985 1997 except ValueError:
1986 1998 raise error.ResponseError(
1987 1999 _('Unexpected response from remote server:'), l)
1988 2000 if resp == 1:
1989 2001 raise util.Abort(_('operation forbidden by server'))
1990 2002 elif resp == 2:
1991 2003 raise util.Abort(_('locking the remote repository failed'))
1992 2004 elif resp != 0:
1993 2005 raise util.Abort(_('the server sent an unknown error code'))
1994 2006 self.ui.status(_('streaming all changes\n'))
1995 2007 l = fp.readline()
1996 2008 try:
1997 2009 total_files, total_bytes = map(int, l.split(' ', 1))
1998 2010 except (ValueError, TypeError):
1999 2011 raise error.ResponseError(
2000 2012 _('Unexpected response from remote server:'), l)
2001 2013 self.ui.status(_('%d files to transfer, %s of data\n') %
2002 2014 (total_files, util.bytecount(total_bytes)))
2003 2015 start = time.time()
2004 2016 for i in xrange(total_files):
2005 2017 # XXX doesn't support '\n' or '\r' in filenames
2006 2018 l = fp.readline()
2007 2019 try:
2008 2020 name, size = l.split('\0', 1)
2009 2021 size = int(size)
2010 2022 except (ValueError, TypeError):
2011 2023 raise error.ResponseError(
2012 2024 _('Unexpected response from remote server:'), l)
2013 2025 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2014 2026 # for backwards compat, name was partially encoded
2015 2027 ofp = self.sopener(store.decodedir(name), 'w')
2016 2028 for chunk in util.filechunkiter(fp, limit=size):
2017 2029 ofp.write(chunk)
2018 2030 ofp.close()
2019 2031 elapsed = time.time() - start
2020 2032 if elapsed <= 0:
2021 2033 elapsed = 0.001
2022 2034 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2023 2035 (util.bytecount(total_bytes), elapsed,
2024 2036 util.bytecount(total_bytes / elapsed)))
2025 2037
2026 2038 # new requirements = old non-format requirements + new format-related
2027 2039 # requirements from the streamed-in repository
2028 2040 requirements.update(set(self.requirements) - self.supportedformats)
2029 2041 self._applyrequirements(requirements)
2030 2042 self._writerequirements()
2031 2043
2032 2044 self.invalidate()
2033 2045 return len(self.heads()) + 1
2034 2046 finally:
2035 2047 lock.release()
2036 2048
2037 2049 def clone(self, remote, heads=[], stream=False):
2038 2050 '''clone remote repository.
2039 2051
2040 2052 keyword arguments:
2041 2053 heads: list of revs to clone (forces use of pull)
2042 2054 stream: use streaming clone if possible'''
2043 2055
2044 2056 # now, all clients that can request uncompressed clones can
2045 2057 # read repo formats supported by all servers that can serve
2046 2058 # them.
2047 2059
2048 2060 # if revlog format changes, client will have to check version
2049 2061 # and format flags on "stream" capability, and use
2050 2062 # uncompressed only if compatible.
2051 2063
2052 2064 if stream and not heads:
2053 2065 # 'stream' means remote revlog format is revlogv1 only
2054 2066 if remote.capable('stream'):
2055 2067 return self.stream_in(remote, set(('revlogv1',)))
2056 2068 # otherwise, 'streamreqs' contains the remote revlog format
2057 2069 streamreqs = remote.capable('streamreqs')
2058 2070 if streamreqs:
2059 2071 streamreqs = set(streamreqs.split(','))
2060 2072 # if we support it, stream in and adjust our requirements
2061 2073 if not streamreqs - self.supportedformats:
2062 2074 return self.stream_in(remote, streamreqs)
2063 2075 return self.pull(remote, heads)
2064 2076
2065 2077 def pushkey(self, namespace, key, old, new):
2066 2078 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2067 2079 old=old, new=new)
2068 2080 ret = pushkey.push(self, namespace, key, old, new)
2069 2081 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2070 2082 ret=ret)
2071 2083 return ret
2072 2084
2073 2085 def listkeys(self, namespace):
2074 2086 self.hook('prelistkeys', throw=True, namespace=namespace)
2075 2087 values = pushkey.list(self, namespace)
2076 2088 self.hook('listkeys', namespace=namespace, values=values)
2077 2089 return values
2078 2090
2079 2091 def debugwireargs(self, one, two, three=None, four=None, five=None):
2080 2092 '''used to test argument passing over the wire'''
2081 2093 return "%s %s %s %s %s" % (one, two, three, four, five)
2082 2094
2083 2095 def savecommitmessage(self, text):
2084 2096 fp = self.opener('last-message.txt', 'wb')
2085 2097 try:
2086 2098 fp.write(text)
2087 2099 finally:
2088 2100 fp.close()
2089 2101 return self.pathto(fp.name[len(self.root)+1:])
2090 2102
2091 2103 # used to avoid circular references so destructors work
2092 2104 def aftertrans(files):
2093 2105 renamefiles = [tuple(t) for t in files]
2094 2106 def a():
2095 2107 for src, dest in renamefiles:
2096 2108 util.rename(src, dest)
2097 2109 return a
2098 2110
2099 2111 def undoname(fn):
2100 2112 base, name = os.path.split(fn)
2101 2113 assert name.startswith('journal')
2102 2114 return os.path.join(base, name.replace('journal', 'undo', 1))
2103 2115
2104 2116 def instance(ui, path, create):
2105 2117 return localrepository(ui, util.urllocalpath(path), create)
2106 2118
2107 2119 def islocal(path):
2108 2120 return True
General Comments 0
You need to be logged in to leave comments. Login now