##// END OF EJS Templates
phases: basic I/O logic...
Pierre-Yves David -
r15418:cf729af2 default
parent child Browse files
Show More
@@ -1,2104 +1,2108 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener.append(
61 61 "00changelog.i",
62 62 '\0\0\0\2' # represents revlogv2
63 63 ' dummy changelog to prevent using the old repo layout'
64 64 )
65 65 if self.ui.configbool('format', 'generaldelta', False):
66 66 requirements.append("generaldelta")
67 67 requirements = set(requirements)
68 68 else:
69 69 raise error.RepoError(_("repository %s not found") % path)
70 70 elif create:
71 71 raise error.RepoError(_("repository %s already exists") % path)
72 72 else:
73 73 try:
74 74 requirements = scmutil.readrequires(self.opener, self.supported)
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 requirements = set()
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100
101 101 self._branchcache = None
102 102 self._branchcachetip = None
103 103 self.filterpats = {}
104 104 self._datafilters = {}
105 105 self._transref = self._lockref = self._wlockref = None
106 106
107 107 # A cache for various files under .hg/ that tracks file changes,
108 108 # (used by the filecache decorator)
109 109 #
110 110 # Maps a property name to its util.filecacheentry
111 111 self._filecache = {}
112 112
113 113 def _applyrequirements(self, requirements):
114 114 self.requirements = requirements
115 115 openerreqs = set(('revlogv1', 'generaldelta'))
116 116 self.sopener.options = dict((r, 1) for r in requirements
117 117 if r in openerreqs)
118 118
119 119 def _writerequirements(self):
120 120 reqfile = self.opener("requires", "w")
121 121 for r in self.requirements:
122 122 reqfile.write("%s\n" % r)
123 123 reqfile.close()
124 124
125 125 def _checknested(self, path):
126 126 """Determine if path is a legal nested repository."""
127 127 if not path.startswith(self.root):
128 128 return False
129 129 subpath = path[len(self.root) + 1:]
130 130
131 131 # XXX: Checking against the current working copy is wrong in
132 132 # the sense that it can reject things like
133 133 #
134 134 # $ hg cat -r 10 sub/x.txt
135 135 #
136 136 # if sub/ is no longer a subrepository in the working copy
137 137 # parent revision.
138 138 #
139 139 # However, it can of course also allow things that would have
140 140 # been rejected before, such as the above cat command if sub/
141 141 # is a subrepository now, but was a normal directory before.
142 142 # The old path auditor would have rejected by mistake since it
143 143 # panics when it sees sub/.hg/.
144 144 #
145 145 # All in all, checking against the working copy seems sensible
146 146 # since we want to prevent access to nested repositories on
147 147 # the filesystem *now*.
148 148 ctx = self[None]
149 149 parts = util.splitpath(subpath)
150 150 while parts:
151 151 prefix = os.sep.join(parts)
152 152 if prefix in ctx.substate:
153 153 if prefix == subpath:
154 154 return True
155 155 else:
156 156 sub = ctx.sub(prefix)
157 157 return sub.checknested(subpath[len(prefix) + 1:])
158 158 else:
159 159 parts.pop()
160 160 return False
161 161
162 162 @filecache('bookmarks')
163 163 def _bookmarks(self):
164 164 return bookmarks.read(self)
165 165
166 166 @filecache('bookmarks.current')
167 167 def _bookmarkcurrent(self):
168 168 return bookmarks.readcurrent(self)
169 169
170 170 def _writebookmarks(self, marks):
171 171 bookmarks.write(self)
172 172
173 @filecache('phaseroots')
174 def _phaseroots(self):
175 return phases.readroots(self)
176
173 177 @filecache('00changelog.i', True)
174 178 def changelog(self):
175 179 c = changelog.changelog(self.sopener)
176 180 if 'HG_PENDING' in os.environ:
177 181 p = os.environ['HG_PENDING']
178 182 if p.startswith(self.root):
179 183 c.readpending('00changelog.i.a')
180 184 return c
181 185
182 186 @filecache('00manifest.i', True)
183 187 def manifest(self):
184 188 return manifest.manifest(self.sopener)
185 189
186 190 @filecache('dirstate')
187 191 def dirstate(self):
188 192 warned = [0]
189 193 def validate(node):
190 194 try:
191 195 self.changelog.rev(node)
192 196 return node
193 197 except error.LookupError:
194 198 if not warned[0]:
195 199 warned[0] = True
196 200 self.ui.warn(_("warning: ignoring unknown"
197 201 " working parent %s!\n") % short(node))
198 202 return nullid
199 203
200 204 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
201 205
202 206 def __getitem__(self, changeid):
203 207 if changeid is None:
204 208 return context.workingctx(self)
205 209 return context.changectx(self, changeid)
206 210
207 211 def __contains__(self, changeid):
208 212 try:
209 213 return bool(self.lookup(changeid))
210 214 except error.RepoLookupError:
211 215 return False
212 216
213 217 def __nonzero__(self):
214 218 return True
215 219
216 220 def __len__(self):
217 221 return len(self.changelog)
218 222
219 223 def __iter__(self):
220 224 for i in xrange(len(self)):
221 225 yield i
222 226
223 227 def revs(self, expr, *args):
224 228 '''Return a list of revisions matching the given revset'''
225 229 expr = revset.formatspec(expr, *args)
226 230 m = revset.match(None, expr)
227 231 return [r for r in m(self, range(len(self)))]
228 232
229 233 def set(self, expr, *args):
230 234 '''
231 235 Yield a context for each matching revision, after doing arg
232 236 replacement via revset.formatspec
233 237 '''
234 238 for r in self.revs(expr, *args):
235 239 yield self[r]
236 240
237 241 def url(self):
238 242 return 'file:' + self.root
239 243
240 244 def hook(self, name, throw=False, **args):
241 245 return hook.hook(self.ui, self, name, throw, **args)
242 246
243 247 tag_disallowed = ':\r\n'
244 248
245 249 def _tag(self, names, node, message, local, user, date, extra={}):
246 250 if isinstance(names, str):
247 251 allchars = names
248 252 names = (names,)
249 253 else:
250 254 allchars = ''.join(names)
251 255 for c in self.tag_disallowed:
252 256 if c in allchars:
253 257 raise util.Abort(_('%r cannot be used in a tag name') % c)
254 258
255 259 branches = self.branchmap()
256 260 for name in names:
257 261 self.hook('pretag', throw=True, node=hex(node), tag=name,
258 262 local=local)
259 263 if name in branches:
260 264 self.ui.warn(_("warning: tag %s conflicts with existing"
261 265 " branch name\n") % name)
262 266
263 267 def writetags(fp, names, munge, prevtags):
264 268 fp.seek(0, 2)
265 269 if prevtags and prevtags[-1] != '\n':
266 270 fp.write('\n')
267 271 for name in names:
268 272 m = munge and munge(name) or name
269 273 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
270 274 old = self.tags().get(name, nullid)
271 275 fp.write('%s %s\n' % (hex(old), m))
272 276 fp.write('%s %s\n' % (hex(node), m))
273 277 fp.close()
274 278
275 279 prevtags = ''
276 280 if local:
277 281 try:
278 282 fp = self.opener('localtags', 'r+')
279 283 except IOError:
280 284 fp = self.opener('localtags', 'a')
281 285 else:
282 286 prevtags = fp.read()
283 287
284 288 # local tags are stored in the current charset
285 289 writetags(fp, names, None, prevtags)
286 290 for name in names:
287 291 self.hook('tag', node=hex(node), tag=name, local=local)
288 292 return
289 293
290 294 try:
291 295 fp = self.wfile('.hgtags', 'rb+')
292 296 except IOError, e:
293 297 if e.errno != errno.ENOENT:
294 298 raise
295 299 fp = self.wfile('.hgtags', 'ab')
296 300 else:
297 301 prevtags = fp.read()
298 302
299 303 # committed tags are stored in UTF-8
300 304 writetags(fp, names, encoding.fromlocal, prevtags)
301 305
302 306 fp.close()
303 307
304 308 if '.hgtags' not in self.dirstate:
305 309 self[None].add(['.hgtags'])
306 310
307 311 m = matchmod.exact(self.root, '', ['.hgtags'])
308 312 tagnode = self.commit(message, user, date, extra=extra, match=m)
309 313
310 314 for name in names:
311 315 self.hook('tag', node=hex(node), tag=name, local=local)
312 316
313 317 return tagnode
314 318
315 319 def tag(self, names, node, message, local, user, date):
316 320 '''tag a revision with one or more symbolic names.
317 321
318 322 names is a list of strings or, when adding a single tag, names may be a
319 323 string.
320 324
321 325 if local is True, the tags are stored in a per-repository file.
322 326 otherwise, they are stored in the .hgtags file, and a new
323 327 changeset is committed with the change.
324 328
325 329 keyword arguments:
326 330
327 331 local: whether to store tags in non-version-controlled file
328 332 (default False)
329 333
330 334 message: commit message to use if committing
331 335
332 336 user: name of user to use if committing
333 337
334 338 date: date tuple to use if committing'''
335 339
336 340 if not local:
337 341 for x in self.status()[:5]:
338 342 if '.hgtags' in x:
339 343 raise util.Abort(_('working copy of .hgtags is changed '
340 344 '(please commit .hgtags manually)'))
341 345
342 346 self.tags() # instantiate the cache
343 347 self._tag(names, node, message, local, user, date)
344 348
345 349 @propertycache
346 350 def _tagscache(self):
347 351 '''Returns a tagscache object that contains various tags related caches.'''
348 352
349 353 # This simplifies its cache management by having one decorated
350 354 # function (this one) and the rest simply fetch things from it.
351 355 class tagscache(object):
352 356 def __init__(self):
353 357 # These two define the set of tags for this repository. tags
354 358 # maps tag name to node; tagtypes maps tag name to 'global' or
355 359 # 'local'. (Global tags are defined by .hgtags across all
356 360 # heads, and local tags are defined in .hg/localtags.)
357 361 # They constitute the in-memory cache of tags.
358 362 self.tags = self.tagtypes = None
359 363
360 364 self.nodetagscache = self.tagslist = None
361 365
362 366 cache = tagscache()
363 367 cache.tags, cache.tagtypes = self._findtags()
364 368
365 369 return cache
366 370
367 371 def tags(self):
368 372 '''return a mapping of tag to node'''
369 373 return self._tagscache.tags
370 374
371 375 def _findtags(self):
372 376 '''Do the hard work of finding tags. Return a pair of dicts
373 377 (tags, tagtypes) where tags maps tag name to node, and tagtypes
374 378 maps tag name to a string like \'global\' or \'local\'.
375 379 Subclasses or extensions are free to add their own tags, but
376 380 should be aware that the returned dicts will be retained for the
377 381 duration of the localrepo object.'''
378 382
379 383 # XXX what tagtype should subclasses/extensions use? Currently
380 384 # mq and bookmarks add tags, but do not set the tagtype at all.
381 385 # Should each extension invent its own tag type? Should there
382 386 # be one tagtype for all such "virtual" tags? Or is the status
383 387 # quo fine?
384 388
385 389 alltags = {} # map tag name to (node, hist)
386 390 tagtypes = {}
387 391
388 392 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
389 393 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
390 394
391 395 # Build the return dicts. Have to re-encode tag names because
392 396 # the tags module always uses UTF-8 (in order not to lose info
393 397 # writing to the cache), but the rest of Mercurial wants them in
394 398 # local encoding.
395 399 tags = {}
396 400 for (name, (node, hist)) in alltags.iteritems():
397 401 if node != nullid:
398 402 try:
399 403 # ignore tags to unknown nodes
400 404 self.changelog.lookup(node)
401 405 tags[encoding.tolocal(name)] = node
402 406 except error.LookupError:
403 407 pass
404 408 tags['tip'] = self.changelog.tip()
405 409 tagtypes = dict([(encoding.tolocal(name), value)
406 410 for (name, value) in tagtypes.iteritems()])
407 411 return (tags, tagtypes)
408 412
409 413 def tagtype(self, tagname):
410 414 '''
411 415 return the type of the given tag. result can be:
412 416
413 417 'local' : a local tag
414 418 'global' : a global tag
415 419 None : tag does not exist
416 420 '''
417 421
418 422 return self._tagscache.tagtypes.get(tagname)
419 423
420 424 def tagslist(self):
421 425 '''return a list of tags ordered by revision'''
422 426 if not self._tagscache.tagslist:
423 427 l = []
424 428 for t, n in self.tags().iteritems():
425 429 r = self.changelog.rev(n)
426 430 l.append((r, t, n))
427 431 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
428 432
429 433 return self._tagscache.tagslist
430 434
431 435 def nodetags(self, node):
432 436 '''return the tags associated with a node'''
433 437 if not self._tagscache.nodetagscache:
434 438 nodetagscache = {}
435 439 for t, n in self.tags().iteritems():
436 440 nodetagscache.setdefault(n, []).append(t)
437 441 for tags in nodetagscache.itervalues():
438 442 tags.sort()
439 443 self._tagscache.nodetagscache = nodetagscache
440 444 return self._tagscache.nodetagscache.get(node, [])
441 445
442 446 def nodebookmarks(self, node):
443 447 marks = []
444 448 for bookmark, n in self._bookmarks.iteritems():
445 449 if n == node:
446 450 marks.append(bookmark)
447 451 return sorted(marks)
448 452
449 453 def _branchtags(self, partial, lrev):
450 454 # TODO: rename this function?
451 455 tiprev = len(self) - 1
452 456 if lrev != tiprev:
453 457 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
454 458 self._updatebranchcache(partial, ctxgen)
455 459 self._writebranchcache(partial, self.changelog.tip(), tiprev)
456 460
457 461 return partial
458 462
459 463 def updatebranchcache(self):
460 464 tip = self.changelog.tip()
461 465 if self._branchcache is not None and self._branchcachetip == tip:
462 466 return self._branchcache
463 467
464 468 oldtip = self._branchcachetip
465 469 self._branchcachetip = tip
466 470 if oldtip is None or oldtip not in self.changelog.nodemap:
467 471 partial, last, lrev = self._readbranchcache()
468 472 else:
469 473 lrev = self.changelog.rev(oldtip)
470 474 partial = self._branchcache
471 475
472 476 self._branchtags(partial, lrev)
473 477 # this private cache holds all heads (not just tips)
474 478 self._branchcache = partial
475 479
476 480 def branchmap(self):
477 481 '''returns a dictionary {branch: [branchheads]}'''
478 482 self.updatebranchcache()
479 483 return self._branchcache
480 484
481 485 def branchtags(self):
482 486 '''return a dict where branch names map to the tipmost head of
483 487 the branch, open heads come before closed'''
484 488 bt = {}
485 489 for bn, heads in self.branchmap().iteritems():
486 490 tip = heads[-1]
487 491 for h in reversed(heads):
488 492 if 'close' not in self.changelog.read(h)[5]:
489 493 tip = h
490 494 break
491 495 bt[bn] = tip
492 496 return bt
493 497
494 498 def _readbranchcache(self):
495 499 partial = {}
496 500 try:
497 501 f = self.opener("cache/branchheads")
498 502 lines = f.read().split('\n')
499 503 f.close()
500 504 except (IOError, OSError):
501 505 return {}, nullid, nullrev
502 506
503 507 try:
504 508 last, lrev = lines.pop(0).split(" ", 1)
505 509 last, lrev = bin(last), int(lrev)
506 510 if lrev >= len(self) or self[lrev].node() != last:
507 511 # invalidate the cache
508 512 raise ValueError('invalidating branch cache (tip differs)')
509 513 for l in lines:
510 514 if not l:
511 515 continue
512 516 node, label = l.split(" ", 1)
513 517 label = encoding.tolocal(label.strip())
514 518 partial.setdefault(label, []).append(bin(node))
515 519 except KeyboardInterrupt:
516 520 raise
517 521 except Exception, inst:
518 522 if self.ui.debugflag:
519 523 self.ui.warn(str(inst), '\n')
520 524 partial, last, lrev = {}, nullid, nullrev
521 525 return partial, last, lrev
522 526
523 527 def _writebranchcache(self, branches, tip, tiprev):
524 528 try:
525 529 f = self.opener("cache/branchheads", "w", atomictemp=True)
526 530 f.write("%s %s\n" % (hex(tip), tiprev))
527 531 for label, nodes in branches.iteritems():
528 532 for node in nodes:
529 533 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
530 534 f.close()
531 535 except (IOError, OSError):
532 536 pass
533 537
534 538 def _updatebranchcache(self, partial, ctxgen):
535 539 # collect new branch entries
536 540 newbranches = {}
537 541 for c in ctxgen:
538 542 newbranches.setdefault(c.branch(), []).append(c.node())
539 543 # if older branchheads are reachable from new ones, they aren't
540 544 # really branchheads. Note checking parents is insufficient:
541 545 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
542 546 for branch, newnodes in newbranches.iteritems():
543 547 bheads = partial.setdefault(branch, [])
544 548 bheads.extend(newnodes)
545 549 if len(bheads) <= 1:
546 550 continue
547 551 bheads = sorted(bheads, key=lambda x: self[x].rev())
548 552 # starting from tip means fewer passes over reachable
549 553 while newnodes:
550 554 latest = newnodes.pop()
551 555 if latest not in bheads:
552 556 continue
553 557 minbhrev = self[bheads[0]].node()
554 558 reachable = self.changelog.reachable(latest, minbhrev)
555 559 reachable.remove(latest)
556 560 if reachable:
557 561 bheads = [b for b in bheads if b not in reachable]
558 562 partial[branch] = bheads
559 563
560 564 def lookup(self, key):
561 565 if isinstance(key, int):
562 566 return self.changelog.node(key)
563 567 elif key == '.':
564 568 return self.dirstate.p1()
565 569 elif key == 'null':
566 570 return nullid
567 571 elif key == 'tip':
568 572 return self.changelog.tip()
569 573 n = self.changelog._match(key)
570 574 if n:
571 575 return n
572 576 if key in self._bookmarks:
573 577 return self._bookmarks[key]
574 578 if key in self.tags():
575 579 return self.tags()[key]
576 580 if key in self.branchtags():
577 581 return self.branchtags()[key]
578 582 n = self.changelog._partialmatch(key)
579 583 if n:
580 584 return n
581 585
582 586 # can't find key, check if it might have come from damaged dirstate
583 587 if key in self.dirstate.parents():
584 588 raise error.Abort(_("working directory has unknown parent '%s'!")
585 589 % short(key))
586 590 try:
587 591 if len(key) == 20:
588 592 key = hex(key)
589 593 except TypeError:
590 594 pass
591 595 raise error.RepoLookupError(_("unknown revision '%s'") % key)
592 596
593 597 def lookupbranch(self, key, remote=None):
594 598 repo = remote or self
595 599 if key in repo.branchmap():
596 600 return key
597 601
598 602 repo = (remote and remote.local()) and remote or self
599 603 return repo[key].branch()
600 604
601 605 def known(self, nodes):
602 606 nm = self.changelog.nodemap
603 607 return [(n in nm) for n in nodes]
604 608
605 609 def local(self):
606 610 return self
607 611
608 612 def join(self, f):
609 613 return os.path.join(self.path, f)
610 614
611 615 def wjoin(self, f):
612 616 return os.path.join(self.root, f)
613 617
614 618 def file(self, f):
615 619 if f[0] == '/':
616 620 f = f[1:]
617 621 return filelog.filelog(self.sopener, f)
618 622
619 623 def changectx(self, changeid):
620 624 return self[changeid]
621 625
622 626 def parents(self, changeid=None):
623 627 '''get list of changectxs for parents of changeid'''
624 628 return self[changeid].parents()
625 629
626 630 def filectx(self, path, changeid=None, fileid=None):
627 631 """changeid can be a changeset revision, node, or tag.
628 632 fileid can be a file revision or node."""
629 633 return context.filectx(self, path, changeid, fileid)
630 634
631 635 def getcwd(self):
632 636 return self.dirstate.getcwd()
633 637
634 638 def pathto(self, f, cwd=None):
635 639 return self.dirstate.pathto(f, cwd)
636 640
637 641 def wfile(self, f, mode='r'):
638 642 return self.wopener(f, mode)
639 643
640 644 def _link(self, f):
641 645 return os.path.islink(self.wjoin(f))
642 646
643 647 def _loadfilter(self, filter):
644 648 if filter not in self.filterpats:
645 649 l = []
646 650 for pat, cmd in self.ui.configitems(filter):
647 651 if cmd == '!':
648 652 continue
649 653 mf = matchmod.match(self.root, '', [pat])
650 654 fn = None
651 655 params = cmd
652 656 for name, filterfn in self._datafilters.iteritems():
653 657 if cmd.startswith(name):
654 658 fn = filterfn
655 659 params = cmd[len(name):].lstrip()
656 660 break
657 661 if not fn:
658 662 fn = lambda s, c, **kwargs: util.filter(s, c)
659 663 # Wrap old filters not supporting keyword arguments
660 664 if not inspect.getargspec(fn)[2]:
661 665 oldfn = fn
662 666 fn = lambda s, c, **kwargs: oldfn(s, c)
663 667 l.append((mf, fn, params))
664 668 self.filterpats[filter] = l
665 669 return self.filterpats[filter]
666 670
667 671 def _filter(self, filterpats, filename, data):
668 672 for mf, fn, cmd in filterpats:
669 673 if mf(filename):
670 674 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
671 675 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
672 676 break
673 677
674 678 return data
675 679
676 680 @propertycache
677 681 def _encodefilterpats(self):
678 682 return self._loadfilter('encode')
679 683
680 684 @propertycache
681 685 def _decodefilterpats(self):
682 686 return self._loadfilter('decode')
683 687
684 688 def adddatafilter(self, name, filter):
685 689 self._datafilters[name] = filter
686 690
687 691 def wread(self, filename):
688 692 if self._link(filename):
689 693 data = os.readlink(self.wjoin(filename))
690 694 else:
691 695 data = self.wopener.read(filename)
692 696 return self._filter(self._encodefilterpats, filename, data)
693 697
694 698 def wwrite(self, filename, data, flags):
695 699 data = self._filter(self._decodefilterpats, filename, data)
696 700 if 'l' in flags:
697 701 self.wopener.symlink(data, filename)
698 702 else:
699 703 self.wopener.write(filename, data)
700 704 if 'x' in flags:
701 705 util.setflags(self.wjoin(filename), False, True)
702 706
703 707 def wwritedata(self, filename, data):
704 708 return self._filter(self._decodefilterpats, filename, data)
705 709
706 710 def transaction(self, desc):
707 711 tr = self._transref and self._transref() or None
708 712 if tr and tr.running():
709 713 return tr.nest()
710 714
711 715 # abort here if the journal already exists
712 716 if os.path.exists(self.sjoin("journal")):
713 717 raise error.RepoError(
714 718 _("abandoned transaction found - run hg recover"))
715 719
716 720 journalfiles = self._writejournal(desc)
717 721 renames = [(x, undoname(x)) for x in journalfiles]
718 722
719 723 tr = transaction.transaction(self.ui.warn, self.sopener,
720 724 self.sjoin("journal"),
721 725 aftertrans(renames),
722 726 self.store.createmode)
723 727 self._transref = weakref.ref(tr)
724 728 return tr
725 729
726 730 def _writejournal(self, desc):
727 731 # save dirstate for rollback
728 732 try:
729 733 ds = self.opener.read("dirstate")
730 734 except IOError:
731 735 ds = ""
732 736 self.opener.write("journal.dirstate", ds)
733 737 self.opener.write("journal.branch",
734 738 encoding.fromlocal(self.dirstate.branch()))
735 739 self.opener.write("journal.desc",
736 740 "%d\n%s\n" % (len(self), desc))
737 741
738 742 bkname = self.join('bookmarks')
739 743 if os.path.exists(bkname):
740 744 util.copyfile(bkname, self.join('journal.bookmarks'))
741 745 else:
742 746 self.opener.write('journal.bookmarks', '')
743 747
744 748 return (self.sjoin('journal'), self.join('journal.dirstate'),
745 749 self.join('journal.branch'), self.join('journal.desc'),
746 750 self.join('journal.bookmarks'))
747 751
748 752 def recover(self):
749 753 lock = self.lock()
750 754 try:
751 755 if os.path.exists(self.sjoin("journal")):
752 756 self.ui.status(_("rolling back interrupted transaction\n"))
753 757 transaction.rollback(self.sopener, self.sjoin("journal"),
754 758 self.ui.warn)
755 759 self.invalidate()
756 760 return True
757 761 else:
758 762 self.ui.warn(_("no interrupted transaction available\n"))
759 763 return False
760 764 finally:
761 765 lock.release()
762 766
763 767 def rollback(self, dryrun=False, force=False):
764 768 wlock = lock = None
765 769 try:
766 770 wlock = self.wlock()
767 771 lock = self.lock()
768 772 if os.path.exists(self.sjoin("undo")):
769 773 return self._rollback(dryrun, force)
770 774 else:
771 775 self.ui.warn(_("no rollback information available\n"))
772 776 return 1
773 777 finally:
774 778 release(lock, wlock)
775 779
776 780 def _rollback(self, dryrun, force):
777 781 ui = self.ui
778 782 try:
779 783 args = self.opener.read('undo.desc').splitlines()
780 784 (oldlen, desc, detail) = (int(args[0]), args[1], None)
781 785 if len(args) >= 3:
782 786 detail = args[2]
783 787 oldtip = oldlen - 1
784 788
785 789 if detail and ui.verbose:
786 790 msg = (_('repository tip rolled back to revision %s'
787 791 ' (undo %s: %s)\n')
788 792 % (oldtip, desc, detail))
789 793 else:
790 794 msg = (_('repository tip rolled back to revision %s'
791 795 ' (undo %s)\n')
792 796 % (oldtip, desc))
793 797 except IOError:
794 798 msg = _('rolling back unknown transaction\n')
795 799 desc = None
796 800
797 801 if not force and self['.'] != self['tip'] and desc == 'commit':
798 802 raise util.Abort(
799 803 _('rollback of last commit while not checked out '
800 804 'may lose data'), hint=_('use -f to force'))
801 805
802 806 ui.status(msg)
803 807 if dryrun:
804 808 return 0
805 809
806 810 parents = self.dirstate.parents()
807 811 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
808 812 if os.path.exists(self.join('undo.bookmarks')):
809 813 util.rename(self.join('undo.bookmarks'),
810 814 self.join('bookmarks'))
811 815 self.invalidate()
812 816
813 817 parentgone = (parents[0] not in self.changelog.nodemap or
814 818 parents[1] not in self.changelog.nodemap)
815 819 if parentgone:
816 820 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
817 821 try:
818 822 branch = self.opener.read('undo.branch')
819 823 self.dirstate.setbranch(branch)
820 824 except IOError:
821 825 ui.warn(_('named branch could not be reset: '
822 826 'current branch is still \'%s\'\n')
823 827 % self.dirstate.branch())
824 828
825 829 self.dirstate.invalidate()
826 830 self.destroyed()
827 831 parents = tuple([p.rev() for p in self.parents()])
828 832 if len(parents) > 1:
829 833 ui.status(_('working directory now based on '
830 834 'revisions %d and %d\n') % parents)
831 835 else:
832 836 ui.status(_('working directory now based on '
833 837 'revision %d\n') % parents)
834 838 return 0
835 839
836 840 def invalidatecaches(self):
837 841 try:
838 842 delattr(self, '_tagscache')
839 843 except AttributeError:
840 844 pass
841 845
842 846 self._branchcache = None # in UTF-8
843 847 self._branchcachetip = None
844 848
845 849 def invalidatedirstate(self):
846 850 '''Invalidates the dirstate, causing the next call to dirstate
847 851 to check if it was modified since the last time it was read,
848 852 rereading it if it has.
849 853
850 854 This is different to dirstate.invalidate() that it doesn't always
851 855 rereads the dirstate. Use dirstate.invalidate() if you want to
852 856 explicitly read the dirstate again (i.e. restoring it to a previous
853 857 known good state).'''
854 858 try:
855 859 delattr(self, 'dirstate')
856 860 except AttributeError:
857 861 pass
858 862
859 863 def invalidate(self):
860 864 for k in self._filecache:
861 865 # dirstate is invalidated separately in invalidatedirstate()
862 866 if k == 'dirstate':
863 867 continue
864 868
865 869 try:
866 870 delattr(self, k)
867 871 except AttributeError:
868 872 pass
869 873 self.invalidatecaches()
870 874
871 875 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
872 876 try:
873 877 l = lock.lock(lockname, 0, releasefn, desc=desc)
874 878 except error.LockHeld, inst:
875 879 if not wait:
876 880 raise
877 881 self.ui.warn(_("waiting for lock on %s held by %r\n") %
878 882 (desc, inst.locker))
879 883 # default to 600 seconds timeout
880 884 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
881 885 releasefn, desc=desc)
882 886 if acquirefn:
883 887 acquirefn()
884 888 return l
885 889
886 890 def lock(self, wait=True):
887 891 '''Lock the repository store (.hg/store) and return a weak reference
888 892 to the lock. Use this before modifying the store (e.g. committing or
889 893 stripping). If you are opening a transaction, get a lock as well.)'''
890 894 l = self._lockref and self._lockref()
891 895 if l is not None and l.held:
892 896 l.lock()
893 897 return l
894 898
895 899 def unlock():
896 900 self.store.write()
897 901 for k, ce in self._filecache.items():
898 902 if k == 'dirstate':
899 903 continue
900 904 ce.refresh()
901 905
902 906 l = self._lock(self.sjoin("lock"), wait, unlock,
903 907 self.invalidate, _('repository %s') % self.origroot)
904 908 self._lockref = weakref.ref(l)
905 909 return l
906 910
907 911 def wlock(self, wait=True):
908 912 '''Lock the non-store parts of the repository (everything under
909 913 .hg except .hg/store) and return a weak reference to the lock.
910 914 Use this before modifying files in .hg.'''
911 915 l = self._wlockref and self._wlockref()
912 916 if l is not None and l.held:
913 917 l.lock()
914 918 return l
915 919
916 920 def unlock():
917 921 self.dirstate.write()
918 922 ce = self._filecache.get('dirstate')
919 923 if ce:
920 924 ce.refresh()
921 925
922 926 l = self._lock(self.join("wlock"), wait, unlock,
923 927 self.invalidatedirstate, _('working directory of %s') %
924 928 self.origroot)
925 929 self._wlockref = weakref.ref(l)
926 930 return l
927 931
928 932 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
929 933 """
930 934 commit an individual file as part of a larger transaction
931 935 """
932 936
933 937 fname = fctx.path()
934 938 text = fctx.data()
935 939 flog = self.file(fname)
936 940 fparent1 = manifest1.get(fname, nullid)
937 941 fparent2 = fparent2o = manifest2.get(fname, nullid)
938 942
939 943 meta = {}
940 944 copy = fctx.renamed()
941 945 if copy and copy[0] != fname:
942 946 # Mark the new revision of this file as a copy of another
943 947 # file. This copy data will effectively act as a parent
944 948 # of this new revision. If this is a merge, the first
945 949 # parent will be the nullid (meaning "look up the copy data")
946 950 # and the second one will be the other parent. For example:
947 951 #
948 952 # 0 --- 1 --- 3 rev1 changes file foo
949 953 # \ / rev2 renames foo to bar and changes it
950 954 # \- 2 -/ rev3 should have bar with all changes and
951 955 # should record that bar descends from
952 956 # bar in rev2 and foo in rev1
953 957 #
954 958 # this allows this merge to succeed:
955 959 #
956 960 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
957 961 # \ / merging rev3 and rev4 should use bar@rev2
958 962 # \- 2 --- 4 as the merge base
959 963 #
960 964
961 965 cfname = copy[0]
962 966 crev = manifest1.get(cfname)
963 967 newfparent = fparent2
964 968
965 969 if manifest2: # branch merge
966 970 if fparent2 == nullid or crev is None: # copied on remote side
967 971 if cfname in manifest2:
968 972 crev = manifest2[cfname]
969 973 newfparent = fparent1
970 974
971 975 # find source in nearest ancestor if we've lost track
972 976 if not crev:
973 977 self.ui.debug(" %s: searching for copy revision for %s\n" %
974 978 (fname, cfname))
975 979 for ancestor in self[None].ancestors():
976 980 if cfname in ancestor:
977 981 crev = ancestor[cfname].filenode()
978 982 break
979 983
980 984 if crev:
981 985 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
982 986 meta["copy"] = cfname
983 987 meta["copyrev"] = hex(crev)
984 988 fparent1, fparent2 = nullid, newfparent
985 989 else:
986 990 self.ui.warn(_("warning: can't find ancestor for '%s' "
987 991 "copied from '%s'!\n") % (fname, cfname))
988 992
989 993 elif fparent2 != nullid:
990 994 # is one parent an ancestor of the other?
991 995 fparentancestor = flog.ancestor(fparent1, fparent2)
992 996 if fparentancestor == fparent1:
993 997 fparent1, fparent2 = fparent2, nullid
994 998 elif fparentancestor == fparent2:
995 999 fparent2 = nullid
996 1000
997 1001 # is the file changed?
998 1002 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
999 1003 changelist.append(fname)
1000 1004 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1001 1005
1002 1006 # are just the flags changed during merge?
1003 1007 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1004 1008 changelist.append(fname)
1005 1009
1006 1010 return fparent1
1007 1011
1008 1012 def commit(self, text="", user=None, date=None, match=None, force=False,
1009 1013 editor=False, extra={}):
1010 1014 """Add a new revision to current repository.
1011 1015
1012 1016 Revision information is gathered from the working directory,
1013 1017 match can be used to filter the committed files. If editor is
1014 1018 supplied, it is called to get a commit message.
1015 1019 """
1016 1020
1017 1021 def fail(f, msg):
1018 1022 raise util.Abort('%s: %s' % (f, msg))
1019 1023
1020 1024 if not match:
1021 1025 match = matchmod.always(self.root, '')
1022 1026
1023 1027 if not force:
1024 1028 vdirs = []
1025 1029 match.dir = vdirs.append
1026 1030 match.bad = fail
1027 1031
1028 1032 wlock = self.wlock()
1029 1033 try:
1030 1034 wctx = self[None]
1031 1035 merge = len(wctx.parents()) > 1
1032 1036
1033 1037 if (not force and merge and match and
1034 1038 (match.files() or match.anypats())):
1035 1039 raise util.Abort(_('cannot partially commit a merge '
1036 1040 '(do not specify files or patterns)'))
1037 1041
1038 1042 changes = self.status(match=match, clean=force)
1039 1043 if force:
1040 1044 changes[0].extend(changes[6]) # mq may commit unchanged files
1041 1045
1042 1046 # check subrepos
1043 1047 subs = []
1044 1048 removedsubs = set()
1045 1049 if '.hgsub' in wctx:
1046 1050 # only manage subrepos and .hgsubstate if .hgsub is present
1047 1051 for p in wctx.parents():
1048 1052 removedsubs.update(s for s in p.substate if match(s))
1049 1053 for s in wctx.substate:
1050 1054 removedsubs.discard(s)
1051 1055 if match(s) and wctx.sub(s).dirty():
1052 1056 subs.append(s)
1053 1057 if (subs or removedsubs):
1054 1058 if (not match('.hgsub') and
1055 1059 '.hgsub' in (wctx.modified() + wctx.added())):
1056 1060 raise util.Abort(
1057 1061 _("can't commit subrepos without .hgsub"))
1058 1062 if '.hgsubstate' not in changes[0]:
1059 1063 changes[0].insert(0, '.hgsubstate')
1060 1064 if '.hgsubstate' in changes[2]:
1061 1065 changes[2].remove('.hgsubstate')
1062 1066 elif '.hgsub' in changes[2]:
1063 1067 # clean up .hgsubstate when .hgsub is removed
1064 1068 if ('.hgsubstate' in wctx and
1065 1069 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1066 1070 changes[2].insert(0, '.hgsubstate')
1067 1071
1068 1072 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1069 1073 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1070 1074 if changedsubs:
1071 1075 raise util.Abort(_("uncommitted changes in subrepo %s")
1072 1076 % changedsubs[0],
1073 1077 hint=_("use --subrepos for recursive commit"))
1074 1078
1075 1079 # make sure all explicit patterns are matched
1076 1080 if not force and match.files():
1077 1081 matched = set(changes[0] + changes[1] + changes[2])
1078 1082
1079 1083 for f in match.files():
1080 1084 if f == '.' or f in matched or f in wctx.substate:
1081 1085 continue
1082 1086 if f in changes[3]: # missing
1083 1087 fail(f, _('file not found!'))
1084 1088 if f in vdirs: # visited directory
1085 1089 d = f + '/'
1086 1090 for mf in matched:
1087 1091 if mf.startswith(d):
1088 1092 break
1089 1093 else:
1090 1094 fail(f, _("no match under directory!"))
1091 1095 elif f not in self.dirstate:
1092 1096 fail(f, _("file not tracked!"))
1093 1097
1094 1098 if (not force and not extra.get("close") and not merge
1095 1099 and not (changes[0] or changes[1] or changes[2])
1096 1100 and wctx.branch() == wctx.p1().branch()):
1097 1101 return None
1098 1102
1099 1103 ms = mergemod.mergestate(self)
1100 1104 for f in changes[0]:
1101 1105 if f in ms and ms[f] == 'u':
1102 1106 raise util.Abort(_("unresolved merge conflicts "
1103 1107 "(see hg help resolve)"))
1104 1108
1105 1109 cctx = context.workingctx(self, text, user, date, extra, changes)
1106 1110 if editor:
1107 1111 cctx._text = editor(self, cctx, subs)
1108 1112 edited = (text != cctx._text)
1109 1113
1110 1114 # commit subs
1111 1115 if subs or removedsubs:
1112 1116 state = wctx.substate.copy()
1113 1117 for s in sorted(subs):
1114 1118 sub = wctx.sub(s)
1115 1119 self.ui.status(_('committing subrepository %s\n') %
1116 1120 subrepo.subrelpath(sub))
1117 1121 sr = sub.commit(cctx._text, user, date)
1118 1122 state[s] = (state[s][0], sr)
1119 1123 subrepo.writestate(self, state)
1120 1124
1121 1125 # Save commit message in case this transaction gets rolled back
1122 1126 # (e.g. by a pretxncommit hook). Leave the content alone on
1123 1127 # the assumption that the user will use the same editor again.
1124 1128 msgfn = self.savecommitmessage(cctx._text)
1125 1129
1126 1130 p1, p2 = self.dirstate.parents()
1127 1131 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1128 1132 try:
1129 1133 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1130 1134 ret = self.commitctx(cctx, True)
1131 1135 except:
1132 1136 if edited:
1133 1137 self.ui.write(
1134 1138 _('note: commit message saved in %s\n') % msgfn)
1135 1139 raise
1136 1140
1137 1141 # update bookmarks, dirstate and mergestate
1138 1142 bookmarks.update(self, p1, ret)
1139 1143 for f in changes[0] + changes[1]:
1140 1144 self.dirstate.normal(f)
1141 1145 for f in changes[2]:
1142 1146 self.dirstate.drop(f)
1143 1147 self.dirstate.setparents(ret)
1144 1148 ms.reset()
1145 1149 finally:
1146 1150 wlock.release()
1147 1151
1148 1152 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1149 1153 return ret
1150 1154
1151 1155 def commitctx(self, ctx, error=False):
1152 1156 """Add a new revision to current repository.
1153 1157 Revision information is passed via the context argument.
1154 1158 """
1155 1159
1156 1160 tr = lock = None
1157 1161 removed = list(ctx.removed())
1158 1162 p1, p2 = ctx.p1(), ctx.p2()
1159 1163 user = ctx.user()
1160 1164
1161 1165 lock = self.lock()
1162 1166 try:
1163 1167 tr = self.transaction("commit")
1164 1168 trp = weakref.proxy(tr)
1165 1169
1166 1170 if ctx.files():
1167 1171 m1 = p1.manifest().copy()
1168 1172 m2 = p2.manifest()
1169 1173
1170 1174 # check in files
1171 1175 new = {}
1172 1176 changed = []
1173 1177 linkrev = len(self)
1174 1178 for f in sorted(ctx.modified() + ctx.added()):
1175 1179 self.ui.note(f + "\n")
1176 1180 try:
1177 1181 fctx = ctx[f]
1178 1182 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1179 1183 changed)
1180 1184 m1.set(f, fctx.flags())
1181 1185 except OSError, inst:
1182 1186 self.ui.warn(_("trouble committing %s!\n") % f)
1183 1187 raise
1184 1188 except IOError, inst:
1185 1189 errcode = getattr(inst, 'errno', errno.ENOENT)
1186 1190 if error or errcode and errcode != errno.ENOENT:
1187 1191 self.ui.warn(_("trouble committing %s!\n") % f)
1188 1192 raise
1189 1193 else:
1190 1194 removed.append(f)
1191 1195
1192 1196 # update manifest
1193 1197 m1.update(new)
1194 1198 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1195 1199 drop = [f for f in removed if f in m1]
1196 1200 for f in drop:
1197 1201 del m1[f]
1198 1202 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1199 1203 p2.manifestnode(), (new, drop))
1200 1204 files = changed + removed
1201 1205 else:
1202 1206 mn = p1.manifestnode()
1203 1207 files = []
1204 1208
1205 1209 # update changelog
1206 1210 self.changelog.delayupdate()
1207 1211 n = self.changelog.add(mn, files, ctx.description(),
1208 1212 trp, p1.node(), p2.node(),
1209 1213 user, ctx.date(), ctx.extra().copy())
1210 1214 p = lambda: self.changelog.writepending() and self.root or ""
1211 1215 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1212 1216 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1213 1217 parent2=xp2, pending=p)
1214 1218 self.changelog.finalize(trp)
1215 1219 tr.close()
1216 1220
1217 1221 if self._branchcache:
1218 1222 self.updatebranchcache()
1219 1223 return n
1220 1224 finally:
1221 1225 if tr:
1222 1226 tr.release()
1223 1227 lock.release()
1224 1228
1225 1229 def destroyed(self):
1226 1230 '''Inform the repository that nodes have been destroyed.
1227 1231 Intended for use by strip and rollback, so there's a common
1228 1232 place for anything that has to be done after destroying history.'''
1229 1233 # XXX it might be nice if we could take the list of destroyed
1230 1234 # nodes, but I don't see an easy way for rollback() to do that
1231 1235
1232 1236 # Ensure the persistent tag cache is updated. Doing it now
1233 1237 # means that the tag cache only has to worry about destroyed
1234 1238 # heads immediately after a strip/rollback. That in turn
1235 1239 # guarantees that "cachetip == currenttip" (comparing both rev
1236 1240 # and node) always means no nodes have been added or destroyed.
1237 1241
1238 1242 # XXX this is suboptimal when qrefresh'ing: we strip the current
1239 1243 # head, refresh the tag cache, then immediately add a new head.
1240 1244 # But I think doing it this way is necessary for the "instant
1241 1245 # tag cache retrieval" case to work.
1242 1246 self.invalidatecaches()
1243 1247
1244 1248 def walk(self, match, node=None):
1245 1249 '''
1246 1250 walk recursively through the directory tree or a given
1247 1251 changeset, finding all files matched by the match
1248 1252 function
1249 1253 '''
1250 1254 return self[node].walk(match)
1251 1255
1252 1256 def status(self, node1='.', node2=None, match=None,
1253 1257 ignored=False, clean=False, unknown=False,
1254 1258 listsubrepos=False):
1255 1259 """return status of files between two nodes or node and working directory
1256 1260
1257 1261 If node1 is None, use the first dirstate parent instead.
1258 1262 If node2 is None, compare node1 with working directory.
1259 1263 """
1260 1264
1261 1265 def mfmatches(ctx):
1262 1266 mf = ctx.manifest().copy()
1263 1267 for fn in mf.keys():
1264 1268 if not match(fn):
1265 1269 del mf[fn]
1266 1270 return mf
1267 1271
1268 1272 if isinstance(node1, context.changectx):
1269 1273 ctx1 = node1
1270 1274 else:
1271 1275 ctx1 = self[node1]
1272 1276 if isinstance(node2, context.changectx):
1273 1277 ctx2 = node2
1274 1278 else:
1275 1279 ctx2 = self[node2]
1276 1280
1277 1281 working = ctx2.rev() is None
1278 1282 parentworking = working and ctx1 == self['.']
1279 1283 match = match or matchmod.always(self.root, self.getcwd())
1280 1284 listignored, listclean, listunknown = ignored, clean, unknown
1281 1285
1282 1286 # load earliest manifest first for caching reasons
1283 1287 if not working and ctx2.rev() < ctx1.rev():
1284 1288 ctx2.manifest()
1285 1289
1286 1290 if not parentworking:
1287 1291 def bad(f, msg):
1288 1292 if f not in ctx1:
1289 1293 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1290 1294 match.bad = bad
1291 1295
1292 1296 if working: # we need to scan the working dir
1293 1297 subrepos = []
1294 1298 if '.hgsub' in self.dirstate:
1295 1299 subrepos = ctx2.substate.keys()
1296 1300 s = self.dirstate.status(match, subrepos, listignored,
1297 1301 listclean, listunknown)
1298 1302 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1299 1303
1300 1304 # check for any possibly clean files
1301 1305 if parentworking and cmp:
1302 1306 fixup = []
1303 1307 # do a full compare of any files that might have changed
1304 1308 for f in sorted(cmp):
1305 1309 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1306 1310 or ctx1[f].cmp(ctx2[f])):
1307 1311 modified.append(f)
1308 1312 else:
1309 1313 fixup.append(f)
1310 1314
1311 1315 # update dirstate for files that are actually clean
1312 1316 if fixup:
1313 1317 if listclean:
1314 1318 clean += fixup
1315 1319
1316 1320 try:
1317 1321 # updating the dirstate is optional
1318 1322 # so we don't wait on the lock
1319 1323 wlock = self.wlock(False)
1320 1324 try:
1321 1325 for f in fixup:
1322 1326 self.dirstate.normal(f)
1323 1327 finally:
1324 1328 wlock.release()
1325 1329 except error.LockError:
1326 1330 pass
1327 1331
1328 1332 if not parentworking:
1329 1333 mf1 = mfmatches(ctx1)
1330 1334 if working:
1331 1335 # we are comparing working dir against non-parent
1332 1336 # generate a pseudo-manifest for the working dir
1333 1337 mf2 = mfmatches(self['.'])
1334 1338 for f in cmp + modified + added:
1335 1339 mf2[f] = None
1336 1340 mf2.set(f, ctx2.flags(f))
1337 1341 for f in removed:
1338 1342 if f in mf2:
1339 1343 del mf2[f]
1340 1344 else:
1341 1345 # we are comparing two revisions
1342 1346 deleted, unknown, ignored = [], [], []
1343 1347 mf2 = mfmatches(ctx2)
1344 1348
1345 1349 modified, added, clean = [], [], []
1346 1350 for fn in mf2:
1347 1351 if fn in mf1:
1348 1352 if (fn not in deleted and
1349 1353 (mf1.flags(fn) != mf2.flags(fn) or
1350 1354 (mf1[fn] != mf2[fn] and
1351 1355 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1352 1356 modified.append(fn)
1353 1357 elif listclean:
1354 1358 clean.append(fn)
1355 1359 del mf1[fn]
1356 1360 elif fn not in deleted:
1357 1361 added.append(fn)
1358 1362 removed = mf1.keys()
1359 1363
1360 1364 if working and modified and not self.dirstate._checklink:
1361 1365 # Symlink placeholders may get non-symlink-like contents
1362 1366 # via user error or dereferencing by NFS or Samba servers,
1363 1367 # so we filter out any placeholders that don't look like a
1364 1368 # symlink
1365 1369 sane = []
1366 1370 for f in modified:
1367 1371 if ctx2.flags(f) == 'l':
1368 1372 d = ctx2[f].data()
1369 1373 if len(d) >= 1024 or '\n' in d or util.binary(d):
1370 1374 self.ui.debug('ignoring suspect symlink placeholder'
1371 1375 ' "%s"\n' % f)
1372 1376 continue
1373 1377 sane.append(f)
1374 1378 modified = sane
1375 1379
1376 1380 r = modified, added, removed, deleted, unknown, ignored, clean
1377 1381
1378 1382 if listsubrepos:
1379 1383 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1380 1384 if working:
1381 1385 rev2 = None
1382 1386 else:
1383 1387 rev2 = ctx2.substate[subpath][1]
1384 1388 try:
1385 1389 submatch = matchmod.narrowmatcher(subpath, match)
1386 1390 s = sub.status(rev2, match=submatch, ignored=listignored,
1387 1391 clean=listclean, unknown=listunknown,
1388 1392 listsubrepos=True)
1389 1393 for rfiles, sfiles in zip(r, s):
1390 1394 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1391 1395 except error.LookupError:
1392 1396 self.ui.status(_("skipping missing subrepository: %s\n")
1393 1397 % subpath)
1394 1398
1395 1399 for l in r:
1396 1400 l.sort()
1397 1401 return r
1398 1402
1399 1403 def heads(self, start=None):
1400 1404 heads = self.changelog.heads(start)
1401 1405 # sort the output in rev descending order
1402 1406 return sorted(heads, key=self.changelog.rev, reverse=True)
1403 1407
1404 1408 def branchheads(self, branch=None, start=None, closed=False):
1405 1409 '''return a (possibly filtered) list of heads for the given branch
1406 1410
1407 1411 Heads are returned in topological order, from newest to oldest.
1408 1412 If branch is None, use the dirstate branch.
1409 1413 If start is not None, return only heads reachable from start.
1410 1414 If closed is True, return heads that are marked as closed as well.
1411 1415 '''
1412 1416 if branch is None:
1413 1417 branch = self[None].branch()
1414 1418 branches = self.branchmap()
1415 1419 if branch not in branches:
1416 1420 return []
1417 1421 # the cache returns heads ordered lowest to highest
1418 1422 bheads = list(reversed(branches[branch]))
1419 1423 if start is not None:
1420 1424 # filter out the heads that cannot be reached from startrev
1421 1425 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1422 1426 bheads = [h for h in bheads if h in fbheads]
1423 1427 if not closed:
1424 1428 bheads = [h for h in bheads if
1425 1429 ('close' not in self.changelog.read(h)[5])]
1426 1430 return bheads
1427 1431
1428 1432 def branches(self, nodes):
1429 1433 if not nodes:
1430 1434 nodes = [self.changelog.tip()]
1431 1435 b = []
1432 1436 for n in nodes:
1433 1437 t = n
1434 1438 while True:
1435 1439 p = self.changelog.parents(n)
1436 1440 if p[1] != nullid or p[0] == nullid:
1437 1441 b.append((t, n, p[0], p[1]))
1438 1442 break
1439 1443 n = p[0]
1440 1444 return b
1441 1445
1442 1446 def between(self, pairs):
1443 1447 r = []
1444 1448
1445 1449 for top, bottom in pairs:
1446 1450 n, l, i = top, [], 0
1447 1451 f = 1
1448 1452
1449 1453 while n != bottom and n != nullid:
1450 1454 p = self.changelog.parents(n)[0]
1451 1455 if i == f:
1452 1456 l.append(n)
1453 1457 f = f * 2
1454 1458 n = p
1455 1459 i += 1
1456 1460
1457 1461 r.append(l)
1458 1462
1459 1463 return r
1460 1464
1461 1465 def pull(self, remote, heads=None, force=False):
1462 1466 lock = self.lock()
1463 1467 try:
1464 1468 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1465 1469 force=force)
1466 1470 common, fetch, rheads = tmp
1467 1471 if not fetch:
1468 1472 self.ui.status(_("no changes found\n"))
1469 1473 result = 0
1470 1474 else:
1471 1475 if heads is None and list(common) == [nullid]:
1472 1476 self.ui.status(_("requesting all changes\n"))
1473 1477 elif heads is None and remote.capable('changegroupsubset'):
1474 1478 # issue1320, avoid a race if remote changed after discovery
1475 1479 heads = rheads
1476 1480
1477 1481 if remote.capable('getbundle'):
1478 1482 cg = remote.getbundle('pull', common=common,
1479 1483 heads=heads or rheads)
1480 1484 elif heads is None:
1481 1485 cg = remote.changegroup(fetch, 'pull')
1482 1486 elif not remote.capable('changegroupsubset'):
1483 1487 raise util.Abort(_("partial pull cannot be done because "
1484 1488 "other repository doesn't support "
1485 1489 "changegroupsubset."))
1486 1490 else:
1487 1491 cg = remote.changegroupsubset(fetch, heads, 'pull')
1488 1492 result = self.addchangegroup(cg, 'pull', remote.url(),
1489 1493 lock=lock)
1490 1494 finally:
1491 1495 lock.release()
1492 1496
1493 1497 return result
1494 1498
1495 1499 def checkpush(self, force, revs):
1496 1500 """Extensions can override this function if additional checks have
1497 1501 to be performed before pushing, or call it if they override push
1498 1502 command.
1499 1503 """
1500 1504 pass
1501 1505
1502 1506 def push(self, remote, force=False, revs=None, newbranch=False):
1503 1507 '''Push outgoing changesets (limited by revs) from the current
1504 1508 repository to remote. Return an integer:
1505 1509 - 0 means HTTP error *or* nothing to push
1506 1510 - 1 means we pushed and remote head count is unchanged *or*
1507 1511 we have outgoing changesets but refused to push
1508 1512 - other values as described by addchangegroup()
1509 1513 '''
1510 1514 # there are two ways to push to remote repo:
1511 1515 #
1512 1516 # addchangegroup assumes local user can lock remote
1513 1517 # repo (local filesystem, old ssh servers).
1514 1518 #
1515 1519 # unbundle assumes local user cannot lock remote repo (new ssh
1516 1520 # servers, http servers).
1517 1521
1518 1522 self.checkpush(force, revs)
1519 1523 lock = None
1520 1524 unbundle = remote.capable('unbundle')
1521 1525 if not unbundle:
1522 1526 lock = remote.lock()
1523 1527 try:
1524 1528 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1525 1529 newbranch)
1526 1530 ret = remote_heads
1527 1531 if cg is not None:
1528 1532 if unbundle:
1529 1533 # local repo finds heads on server, finds out what
1530 1534 # revs it must push. once revs transferred, if server
1531 1535 # finds it has different heads (someone else won
1532 1536 # commit/push race), server aborts.
1533 1537 if force:
1534 1538 remote_heads = ['force']
1535 1539 # ssh: return remote's addchangegroup()
1536 1540 # http: return remote's addchangegroup() or 0 for error
1537 1541 ret = remote.unbundle(cg, remote_heads, 'push')
1538 1542 else:
1539 1543 # we return an integer indicating remote head count change
1540 1544 ret = remote.addchangegroup(cg, 'push', self.url(),
1541 1545 lock=lock)
1542 1546 finally:
1543 1547 if lock is not None:
1544 1548 lock.release()
1545 1549
1546 1550 self.ui.debug("checking for updated bookmarks\n")
1547 1551 rb = remote.listkeys('bookmarks')
1548 1552 for k in rb.keys():
1549 1553 if k in self._bookmarks:
1550 1554 nr, nl = rb[k], hex(self._bookmarks[k])
1551 1555 if nr in self:
1552 1556 cr = self[nr]
1553 1557 cl = self[nl]
1554 1558 if cl in cr.descendants():
1555 1559 r = remote.pushkey('bookmarks', k, nr, nl)
1556 1560 if r:
1557 1561 self.ui.status(_("updating bookmark %s\n") % k)
1558 1562 else:
1559 1563 self.ui.warn(_('updating bookmark %s'
1560 1564 ' failed!\n') % k)
1561 1565
1562 1566 return ret
1563 1567
1564 1568 def changegroupinfo(self, nodes, source):
1565 1569 if self.ui.verbose or source == 'bundle':
1566 1570 self.ui.status(_("%d changesets found\n") % len(nodes))
1567 1571 if self.ui.debugflag:
1568 1572 self.ui.debug("list of changesets:\n")
1569 1573 for node in nodes:
1570 1574 self.ui.debug("%s\n" % hex(node))
1571 1575
1572 1576 def changegroupsubset(self, bases, heads, source):
1573 1577 """Compute a changegroup consisting of all the nodes that are
1574 1578 descendants of any of the bases and ancestors of any of the heads.
1575 1579 Return a chunkbuffer object whose read() method will return
1576 1580 successive changegroup chunks.
1577 1581
1578 1582 It is fairly complex as determining which filenodes and which
1579 1583 manifest nodes need to be included for the changeset to be complete
1580 1584 is non-trivial.
1581 1585
1582 1586 Another wrinkle is doing the reverse, figuring out which changeset in
1583 1587 the changegroup a particular filenode or manifestnode belongs to.
1584 1588 """
1585 1589 cl = self.changelog
1586 1590 if not bases:
1587 1591 bases = [nullid]
1588 1592 csets, bases, heads = cl.nodesbetween(bases, heads)
1589 1593 # We assume that all ancestors of bases are known
1590 1594 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1591 1595 return self._changegroupsubset(common, csets, heads, source)
1592 1596
1593 1597 def getbundle(self, source, heads=None, common=None):
1594 1598 """Like changegroupsubset, but returns the set difference between the
1595 1599 ancestors of heads and the ancestors common.
1596 1600
1597 1601 If heads is None, use the local heads. If common is None, use [nullid].
1598 1602
1599 1603 The nodes in common might not all be known locally due to the way the
1600 1604 current discovery protocol works.
1601 1605 """
1602 1606 cl = self.changelog
1603 1607 if common:
1604 1608 nm = cl.nodemap
1605 1609 common = [n for n in common if n in nm]
1606 1610 else:
1607 1611 common = [nullid]
1608 1612 if not heads:
1609 1613 heads = cl.heads()
1610 1614 common, missing = cl.findcommonmissing(common, heads)
1611 1615 if not missing:
1612 1616 return None
1613 1617 return self._changegroupsubset(common, missing, heads, source)
1614 1618
1615 1619 def _changegroupsubset(self, commonrevs, csets, heads, source):
1616 1620
1617 1621 cl = self.changelog
1618 1622 mf = self.manifest
1619 1623 mfs = {} # needed manifests
1620 1624 fnodes = {} # needed file nodes
1621 1625 changedfiles = set()
1622 1626 fstate = ['', {}]
1623 1627 count = [0]
1624 1628
1625 1629 # can we go through the fast path ?
1626 1630 heads.sort()
1627 1631 if heads == sorted(self.heads()):
1628 1632 return self._changegroup(csets, source)
1629 1633
1630 1634 # slow path
1631 1635 self.hook('preoutgoing', throw=True, source=source)
1632 1636 self.changegroupinfo(csets, source)
1633 1637
1634 1638 # filter any nodes that claim to be part of the known set
1635 1639 def prune(revlog, missing):
1636 1640 return [n for n in missing
1637 1641 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1638 1642
1639 1643 def lookup(revlog, x):
1640 1644 if revlog == cl:
1641 1645 c = cl.read(x)
1642 1646 changedfiles.update(c[3])
1643 1647 mfs.setdefault(c[0], x)
1644 1648 count[0] += 1
1645 1649 self.ui.progress(_('bundling'), count[0],
1646 1650 unit=_('changesets'), total=len(csets))
1647 1651 return x
1648 1652 elif revlog == mf:
1649 1653 clnode = mfs[x]
1650 1654 mdata = mf.readfast(x)
1651 1655 for f in changedfiles:
1652 1656 if f in mdata:
1653 1657 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1654 1658 count[0] += 1
1655 1659 self.ui.progress(_('bundling'), count[0],
1656 1660 unit=_('manifests'), total=len(mfs))
1657 1661 return mfs[x]
1658 1662 else:
1659 1663 self.ui.progress(
1660 1664 _('bundling'), count[0], item=fstate[0],
1661 1665 unit=_('files'), total=len(changedfiles))
1662 1666 return fstate[1][x]
1663 1667
1664 1668 bundler = changegroup.bundle10(lookup)
1665 1669 reorder = self.ui.config('bundle', 'reorder', 'auto')
1666 1670 if reorder == 'auto':
1667 1671 reorder = None
1668 1672 else:
1669 1673 reorder = util.parsebool(reorder)
1670 1674
1671 1675 def gengroup():
1672 1676 # Create a changenode group generator that will call our functions
1673 1677 # back to lookup the owning changenode and collect information.
1674 1678 for chunk in cl.group(csets, bundler, reorder=reorder):
1675 1679 yield chunk
1676 1680 self.ui.progress(_('bundling'), None)
1677 1681
1678 1682 # Create a generator for the manifestnodes that calls our lookup
1679 1683 # and data collection functions back.
1680 1684 count[0] = 0
1681 1685 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1682 1686 yield chunk
1683 1687 self.ui.progress(_('bundling'), None)
1684 1688
1685 1689 mfs.clear()
1686 1690
1687 1691 # Go through all our files in order sorted by name.
1688 1692 count[0] = 0
1689 1693 for fname in sorted(changedfiles):
1690 1694 filerevlog = self.file(fname)
1691 1695 if not len(filerevlog):
1692 1696 raise util.Abort(_("empty or missing revlog for %s") % fname)
1693 1697 fstate[0] = fname
1694 1698 fstate[1] = fnodes.pop(fname, {})
1695 1699
1696 1700 nodelist = prune(filerevlog, fstate[1])
1697 1701 if nodelist:
1698 1702 count[0] += 1
1699 1703 yield bundler.fileheader(fname)
1700 1704 for chunk in filerevlog.group(nodelist, bundler, reorder):
1701 1705 yield chunk
1702 1706
1703 1707 # Signal that no more groups are left.
1704 1708 yield bundler.close()
1705 1709 self.ui.progress(_('bundling'), None)
1706 1710
1707 1711 if csets:
1708 1712 self.hook('outgoing', node=hex(csets[0]), source=source)
1709 1713
1710 1714 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1711 1715
1712 1716 def changegroup(self, basenodes, source):
1713 1717 # to avoid a race we use changegroupsubset() (issue1320)
1714 1718 return self.changegroupsubset(basenodes, self.heads(), source)
1715 1719
1716 1720 def _changegroup(self, nodes, source):
1717 1721 """Compute the changegroup of all nodes that we have that a recipient
1718 1722 doesn't. Return a chunkbuffer object whose read() method will return
1719 1723 successive changegroup chunks.
1720 1724
1721 1725 This is much easier than the previous function as we can assume that
1722 1726 the recipient has any changenode we aren't sending them.
1723 1727
1724 1728 nodes is the set of nodes to send"""
1725 1729
1726 1730 cl = self.changelog
1727 1731 mf = self.manifest
1728 1732 mfs = {}
1729 1733 changedfiles = set()
1730 1734 fstate = ['']
1731 1735 count = [0]
1732 1736
1733 1737 self.hook('preoutgoing', throw=True, source=source)
1734 1738 self.changegroupinfo(nodes, source)
1735 1739
1736 1740 revset = set([cl.rev(n) for n in nodes])
1737 1741
1738 1742 def gennodelst(log):
1739 1743 return [log.node(r) for r in log if log.linkrev(r) in revset]
1740 1744
1741 1745 def lookup(revlog, x):
1742 1746 if revlog == cl:
1743 1747 c = cl.read(x)
1744 1748 changedfiles.update(c[3])
1745 1749 mfs.setdefault(c[0], x)
1746 1750 count[0] += 1
1747 1751 self.ui.progress(_('bundling'), count[0],
1748 1752 unit=_('changesets'), total=len(nodes))
1749 1753 return x
1750 1754 elif revlog == mf:
1751 1755 count[0] += 1
1752 1756 self.ui.progress(_('bundling'), count[0],
1753 1757 unit=_('manifests'), total=len(mfs))
1754 1758 return cl.node(revlog.linkrev(revlog.rev(x)))
1755 1759 else:
1756 1760 self.ui.progress(
1757 1761 _('bundling'), count[0], item=fstate[0],
1758 1762 total=len(changedfiles), unit=_('files'))
1759 1763 return cl.node(revlog.linkrev(revlog.rev(x)))
1760 1764
1761 1765 bundler = changegroup.bundle10(lookup)
1762 1766 reorder = self.ui.config('bundle', 'reorder', 'auto')
1763 1767 if reorder == 'auto':
1764 1768 reorder = None
1765 1769 else:
1766 1770 reorder = util.parsebool(reorder)
1767 1771
1768 1772 def gengroup():
1769 1773 '''yield a sequence of changegroup chunks (strings)'''
1770 1774 # construct a list of all changed files
1771 1775
1772 1776 for chunk in cl.group(nodes, bundler, reorder=reorder):
1773 1777 yield chunk
1774 1778 self.ui.progress(_('bundling'), None)
1775 1779
1776 1780 count[0] = 0
1777 1781 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1778 1782 yield chunk
1779 1783 self.ui.progress(_('bundling'), None)
1780 1784
1781 1785 count[0] = 0
1782 1786 for fname in sorted(changedfiles):
1783 1787 filerevlog = self.file(fname)
1784 1788 if not len(filerevlog):
1785 1789 raise util.Abort(_("empty or missing revlog for %s") % fname)
1786 1790 fstate[0] = fname
1787 1791 nodelist = gennodelst(filerevlog)
1788 1792 if nodelist:
1789 1793 count[0] += 1
1790 1794 yield bundler.fileheader(fname)
1791 1795 for chunk in filerevlog.group(nodelist, bundler, reorder):
1792 1796 yield chunk
1793 1797 yield bundler.close()
1794 1798 self.ui.progress(_('bundling'), None)
1795 1799
1796 1800 if nodes:
1797 1801 self.hook('outgoing', node=hex(nodes[0]), source=source)
1798 1802
1799 1803 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1800 1804
1801 1805 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1802 1806 """Add the changegroup returned by source.read() to this repo.
1803 1807 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1804 1808 the URL of the repo where this changegroup is coming from.
1805 1809 If lock is not None, the function takes ownership of the lock
1806 1810 and releases it after the changegroup is added.
1807 1811
1808 1812 Return an integer summarizing the change to this repo:
1809 1813 - nothing changed or no source: 0
1810 1814 - more heads than before: 1+added heads (2..n)
1811 1815 - fewer heads than before: -1-removed heads (-2..-n)
1812 1816 - number of heads stays the same: 1
1813 1817 """
1814 1818 def csmap(x):
1815 1819 self.ui.debug("add changeset %s\n" % short(x))
1816 1820 return len(cl)
1817 1821
1818 1822 def revmap(x):
1819 1823 return cl.rev(x)
1820 1824
1821 1825 if not source:
1822 1826 return 0
1823 1827
1824 1828 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1825 1829
1826 1830 changesets = files = revisions = 0
1827 1831 efiles = set()
1828 1832
1829 1833 # write changelog data to temp files so concurrent readers will not see
1830 1834 # inconsistent view
1831 1835 cl = self.changelog
1832 1836 cl.delayupdate()
1833 1837 oldheads = cl.heads()
1834 1838
1835 1839 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1836 1840 try:
1837 1841 trp = weakref.proxy(tr)
1838 1842 # pull off the changeset group
1839 1843 self.ui.status(_("adding changesets\n"))
1840 1844 clstart = len(cl)
1841 1845 class prog(object):
1842 1846 step = _('changesets')
1843 1847 count = 1
1844 1848 ui = self.ui
1845 1849 total = None
1846 1850 def __call__(self):
1847 1851 self.ui.progress(self.step, self.count, unit=_('chunks'),
1848 1852 total=self.total)
1849 1853 self.count += 1
1850 1854 pr = prog()
1851 1855 source.callback = pr
1852 1856
1853 1857 source.changelogheader()
1854 1858 if (cl.addgroup(source, csmap, trp) is None
1855 1859 and not emptyok):
1856 1860 raise util.Abort(_("received changelog group is empty"))
1857 1861 clend = len(cl)
1858 1862 changesets = clend - clstart
1859 1863 for c in xrange(clstart, clend):
1860 1864 efiles.update(self[c].files())
1861 1865 efiles = len(efiles)
1862 1866 self.ui.progress(_('changesets'), None)
1863 1867
1864 1868 # pull off the manifest group
1865 1869 self.ui.status(_("adding manifests\n"))
1866 1870 pr.step = _('manifests')
1867 1871 pr.count = 1
1868 1872 pr.total = changesets # manifests <= changesets
1869 1873 # no need to check for empty manifest group here:
1870 1874 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1871 1875 # no new manifest will be created and the manifest group will
1872 1876 # be empty during the pull
1873 1877 source.manifestheader()
1874 1878 self.manifest.addgroup(source, revmap, trp)
1875 1879 self.ui.progress(_('manifests'), None)
1876 1880
1877 1881 needfiles = {}
1878 1882 if self.ui.configbool('server', 'validate', default=False):
1879 1883 # validate incoming csets have their manifests
1880 1884 for cset in xrange(clstart, clend):
1881 1885 mfest = self.changelog.read(self.changelog.node(cset))[0]
1882 1886 mfest = self.manifest.readdelta(mfest)
1883 1887 # store file nodes we must see
1884 1888 for f, n in mfest.iteritems():
1885 1889 needfiles.setdefault(f, set()).add(n)
1886 1890
1887 1891 # process the files
1888 1892 self.ui.status(_("adding file changes\n"))
1889 1893 pr.step = _('files')
1890 1894 pr.count = 1
1891 1895 pr.total = efiles
1892 1896 source.callback = None
1893 1897
1894 1898 while True:
1895 1899 chunkdata = source.filelogheader()
1896 1900 if not chunkdata:
1897 1901 break
1898 1902 f = chunkdata["filename"]
1899 1903 self.ui.debug("adding %s revisions\n" % f)
1900 1904 pr()
1901 1905 fl = self.file(f)
1902 1906 o = len(fl)
1903 1907 if fl.addgroup(source, revmap, trp) is None:
1904 1908 raise util.Abort(_("received file revlog group is empty"))
1905 1909 revisions += len(fl) - o
1906 1910 files += 1
1907 1911 if f in needfiles:
1908 1912 needs = needfiles[f]
1909 1913 for new in xrange(o, len(fl)):
1910 1914 n = fl.node(new)
1911 1915 if n in needs:
1912 1916 needs.remove(n)
1913 1917 if not needs:
1914 1918 del needfiles[f]
1915 1919 self.ui.progress(_('files'), None)
1916 1920
1917 1921 for f, needs in needfiles.iteritems():
1918 1922 fl = self.file(f)
1919 1923 for n in needs:
1920 1924 try:
1921 1925 fl.rev(n)
1922 1926 except error.LookupError:
1923 1927 raise util.Abort(
1924 1928 _('missing file data for %s:%s - run hg verify') %
1925 1929 (f, hex(n)))
1926 1930
1927 1931 dh = 0
1928 1932 if oldheads:
1929 1933 heads = cl.heads()
1930 1934 dh = len(heads) - len(oldheads)
1931 1935 for h in heads:
1932 1936 if h not in oldheads and 'close' in self[h].extra():
1933 1937 dh -= 1
1934 1938 htext = ""
1935 1939 if dh:
1936 1940 htext = _(" (%+d heads)") % dh
1937 1941
1938 1942 self.ui.status(_("added %d changesets"
1939 1943 " with %d changes to %d files%s\n")
1940 1944 % (changesets, revisions, files, htext))
1941 1945
1942 1946 if changesets > 0:
1943 1947 p = lambda: cl.writepending() and self.root or ""
1944 1948 self.hook('pretxnchangegroup', throw=True,
1945 1949 node=hex(cl.node(clstart)), source=srctype,
1946 1950 url=url, pending=p)
1947 1951
1948 1952 # make changelog see real files again
1949 1953 cl.finalize(trp)
1950 1954
1951 1955 tr.close()
1952 1956 finally:
1953 1957 tr.release()
1954 1958 if lock:
1955 1959 lock.release()
1956 1960
1957 1961 if changesets > 0:
1958 1962 # forcefully update the on-disk branch cache
1959 1963 self.ui.debug("updating the branch cache\n")
1960 1964 self.updatebranchcache()
1961 1965 self.hook("changegroup", node=hex(cl.node(clstart)),
1962 1966 source=srctype, url=url)
1963 1967
1964 1968 for i in xrange(clstart, clend):
1965 1969 self.hook("incoming", node=hex(cl.node(i)),
1966 1970 source=srctype, url=url)
1967 1971
1968 1972 # never return 0 here:
1969 1973 if dh < 0:
1970 1974 return dh - 1
1971 1975 else:
1972 1976 return dh + 1
1973 1977
1974 1978 def stream_in(self, remote, requirements):
1975 1979 lock = self.lock()
1976 1980 try:
1977 1981 fp = remote.stream_out()
1978 1982 l = fp.readline()
1979 1983 try:
1980 1984 resp = int(l)
1981 1985 except ValueError:
1982 1986 raise error.ResponseError(
1983 1987 _('Unexpected response from remote server:'), l)
1984 1988 if resp == 1:
1985 1989 raise util.Abort(_('operation forbidden by server'))
1986 1990 elif resp == 2:
1987 1991 raise util.Abort(_('locking the remote repository failed'))
1988 1992 elif resp != 0:
1989 1993 raise util.Abort(_('the server sent an unknown error code'))
1990 1994 self.ui.status(_('streaming all changes\n'))
1991 1995 l = fp.readline()
1992 1996 try:
1993 1997 total_files, total_bytes = map(int, l.split(' ', 1))
1994 1998 except (ValueError, TypeError):
1995 1999 raise error.ResponseError(
1996 2000 _('Unexpected response from remote server:'), l)
1997 2001 self.ui.status(_('%d files to transfer, %s of data\n') %
1998 2002 (total_files, util.bytecount(total_bytes)))
1999 2003 start = time.time()
2000 2004 for i in xrange(total_files):
2001 2005 # XXX doesn't support '\n' or '\r' in filenames
2002 2006 l = fp.readline()
2003 2007 try:
2004 2008 name, size = l.split('\0', 1)
2005 2009 size = int(size)
2006 2010 except (ValueError, TypeError):
2007 2011 raise error.ResponseError(
2008 2012 _('Unexpected response from remote server:'), l)
2009 2013 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2010 2014 # for backwards compat, name was partially encoded
2011 2015 ofp = self.sopener(store.decodedir(name), 'w')
2012 2016 for chunk in util.filechunkiter(fp, limit=size):
2013 2017 ofp.write(chunk)
2014 2018 ofp.close()
2015 2019 elapsed = time.time() - start
2016 2020 if elapsed <= 0:
2017 2021 elapsed = 0.001
2018 2022 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2019 2023 (util.bytecount(total_bytes), elapsed,
2020 2024 util.bytecount(total_bytes / elapsed)))
2021 2025
2022 2026 # new requirements = old non-format requirements + new format-related
2023 2027 # requirements from the streamed-in repository
2024 2028 requirements.update(set(self.requirements) - self.supportedformats)
2025 2029 self._applyrequirements(requirements)
2026 2030 self._writerequirements()
2027 2031
2028 2032 self.invalidate()
2029 2033 return len(self.heads()) + 1
2030 2034 finally:
2031 2035 lock.release()
2032 2036
2033 2037 def clone(self, remote, heads=[], stream=False):
2034 2038 '''clone remote repository.
2035 2039
2036 2040 keyword arguments:
2037 2041 heads: list of revs to clone (forces use of pull)
2038 2042 stream: use streaming clone if possible'''
2039 2043
2040 2044 # now, all clients that can request uncompressed clones can
2041 2045 # read repo formats supported by all servers that can serve
2042 2046 # them.
2043 2047
2044 2048 # if revlog format changes, client will have to check version
2045 2049 # and format flags on "stream" capability, and use
2046 2050 # uncompressed only if compatible.
2047 2051
2048 2052 if stream and not heads:
2049 2053 # 'stream' means remote revlog format is revlogv1 only
2050 2054 if remote.capable('stream'):
2051 2055 return self.stream_in(remote, set(('revlogv1',)))
2052 2056 # otherwise, 'streamreqs' contains the remote revlog format
2053 2057 streamreqs = remote.capable('streamreqs')
2054 2058 if streamreqs:
2055 2059 streamreqs = set(streamreqs.split(','))
2056 2060 # if we support it, stream in and adjust our requirements
2057 2061 if not streamreqs - self.supportedformats:
2058 2062 return self.stream_in(remote, streamreqs)
2059 2063 return self.pull(remote, heads)
2060 2064
2061 2065 def pushkey(self, namespace, key, old, new):
2062 2066 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2063 2067 old=old, new=new)
2064 2068 ret = pushkey.push(self, namespace, key, old, new)
2065 2069 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2066 2070 ret=ret)
2067 2071 return ret
2068 2072
2069 2073 def listkeys(self, namespace):
2070 2074 self.hook('prelistkeys', throw=True, namespace=namespace)
2071 2075 values = pushkey.list(self, namespace)
2072 2076 self.hook('listkeys', namespace=namespace, values=values)
2073 2077 return values
2074 2078
2075 2079 def debugwireargs(self, one, two, three=None, four=None, five=None):
2076 2080 '''used to test argument passing over the wire'''
2077 2081 return "%s %s %s %s %s" % (one, two, three, four, five)
2078 2082
2079 2083 def savecommitmessage(self, text):
2080 2084 fp = self.opener('last-message.txt', 'wb')
2081 2085 try:
2082 2086 fp.write(text)
2083 2087 finally:
2084 2088 fp.close()
2085 2089 return self.pathto(fp.name[len(self.root)+1:])
2086 2090
2087 2091 # used to avoid circular references so destructors work
2088 2092 def aftertrans(files):
2089 2093 renamefiles = [tuple(t) for t in files]
2090 2094 def a():
2091 2095 for src, dest in renamefiles:
2092 2096 util.rename(src, dest)
2093 2097 return a
2094 2098
2095 2099 def undoname(fn):
2096 2100 base, name = os.path.split(fn)
2097 2101 assert name.startswith('journal')
2098 2102 return os.path.join(base, name.replace('journal', 'undo', 1))
2099 2103
2100 2104 def instance(ui, path, create):
2101 2105 return localrepository(ui, util.urllocalpath(path), create)
2102 2106
2103 2107 def islocal(path):
2104 2108 return True
@@ -1,11 +1,39 b''
1 1 # Mercurial phases support code
2 2 #
3 3 # Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 # Augie Fackler <durin42@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 from node import nullid, bin, hex
11
10 12 allphases = range(2)
11 13 trackedphases = allphases[1:]
14
15 def readroots(repo):
16 """Read phase roots from disk"""
17 roots = [set() for i in allphases]
18 roots[0].add(nullid)
19 try:
20 f = repo.sopener('phaseroots')
21 try:
22 for line in f:
23 phase, nh = line.strip().split()
24 roots[int(phase)].add(bin(nh))
25 finally:
26 f.close()
27 except IOError:
28 pass # default value are enough
29 return roots
30
31 def writeroots(repo):
32 """Write phase roots from disk"""
33 f = repo.sopener('phaseroots', 'w', atomictemp=True)
34 try:
35 for phase, roots in enumerate(repo._phaseroots):
36 for h in roots:
37 f.write('%i %s\n' % (phase, hex(h)))
38 finally:
39 f.close()
General Comments 0
You need to be logged in to leave comments. Login now