##// END OF EJS Templates
phases: add a moveboundary function to move phases boundaries...
Pierre-Yves David -
r15454:5a7dde5a default
parent child Browse files
Show More
@@ -1,2120 +1,2124
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 self._dirtyphases = False
39 40
40 41 try:
41 42 self.ui.readconfig(self.join("hgrc"), self.root)
42 43 extensions.loadall(self.ui)
43 44 except IOError:
44 45 pass
45 46
46 47 if not os.path.isdir(self.path):
47 48 if create:
48 49 if not os.path.exists(path):
49 50 util.makedirs(path)
50 51 util.makedir(self.path, notindexed=True)
51 52 requirements = ["revlogv1"]
52 53 if self.ui.configbool('format', 'usestore', True):
53 54 os.mkdir(os.path.join(self.path, "store"))
54 55 requirements.append("store")
55 56 if self.ui.configbool('format', 'usefncache', True):
56 57 requirements.append("fncache")
57 58 if self.ui.configbool('format', 'dotencode', True):
58 59 requirements.append('dotencode')
59 60 # create an invalid changelog
60 61 self.opener.append(
61 62 "00changelog.i",
62 63 '\0\0\0\2' # represents revlogv2
63 64 ' dummy changelog to prevent using the old repo layout'
64 65 )
65 66 if self.ui.configbool('format', 'generaldelta', False):
66 67 requirements.append("generaldelta")
67 68 requirements = set(requirements)
68 69 else:
69 70 raise error.RepoError(_("repository %s not found") % path)
70 71 elif create:
71 72 raise error.RepoError(_("repository %s already exists") % path)
72 73 else:
73 74 try:
74 75 requirements = scmutil.readrequires(self.opener, self.supported)
75 76 except IOError, inst:
76 77 if inst.errno != errno.ENOENT:
77 78 raise
78 79 requirements = set()
79 80
80 81 self.sharedpath = self.path
81 82 try:
82 83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 84 if not os.path.exists(s):
84 85 raise error.RepoError(
85 86 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 87 self.sharedpath = s
87 88 except IOError, inst:
88 89 if inst.errno != errno.ENOENT:
89 90 raise
90 91
91 92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 93 self.spath = self.store.path
93 94 self.sopener = self.store.opener
94 95 self.sjoin = self.store.join
95 96 self.opener.createmode = self.store.createmode
96 97 self._applyrequirements(requirements)
97 98 if create:
98 99 self._writerequirements()
99 100
100 101
101 102 self._branchcache = None
102 103 self._branchcachetip = None
103 104 self.filterpats = {}
104 105 self._datafilters = {}
105 106 self._transref = self._lockref = self._wlockref = None
106 107
107 108 # A cache for various files under .hg/ that tracks file changes,
108 109 # (used by the filecache decorator)
109 110 #
110 111 # Maps a property name to its util.filecacheentry
111 112 self._filecache = {}
112 113
113 114 def _applyrequirements(self, requirements):
114 115 self.requirements = requirements
115 116 openerreqs = set(('revlogv1', 'generaldelta'))
116 117 self.sopener.options = dict((r, 1) for r in requirements
117 118 if r in openerreqs)
118 119
119 120 def _writerequirements(self):
120 121 reqfile = self.opener("requires", "w")
121 122 for r in self.requirements:
122 123 reqfile.write("%s\n" % r)
123 124 reqfile.close()
124 125
125 126 def _checknested(self, path):
126 127 """Determine if path is a legal nested repository."""
127 128 if not path.startswith(self.root):
128 129 return False
129 130 subpath = path[len(self.root) + 1:]
130 131
131 132 # XXX: Checking against the current working copy is wrong in
132 133 # the sense that it can reject things like
133 134 #
134 135 # $ hg cat -r 10 sub/x.txt
135 136 #
136 137 # if sub/ is no longer a subrepository in the working copy
137 138 # parent revision.
138 139 #
139 140 # However, it can of course also allow things that would have
140 141 # been rejected before, such as the above cat command if sub/
141 142 # is a subrepository now, but was a normal directory before.
142 143 # The old path auditor would have rejected by mistake since it
143 144 # panics when it sees sub/.hg/.
144 145 #
145 146 # All in all, checking against the working copy seems sensible
146 147 # since we want to prevent access to nested repositories on
147 148 # the filesystem *now*.
148 149 ctx = self[None]
149 150 parts = util.splitpath(subpath)
150 151 while parts:
151 152 prefix = os.sep.join(parts)
152 153 if prefix in ctx.substate:
153 154 if prefix == subpath:
154 155 return True
155 156 else:
156 157 sub = ctx.sub(prefix)
157 158 return sub.checknested(subpath[len(prefix) + 1:])
158 159 else:
159 160 parts.pop()
160 161 return False
161 162
162 163 @filecache('bookmarks')
163 164 def _bookmarks(self):
164 165 return bookmarks.read(self)
165 166
166 167 @filecache('bookmarks.current')
167 168 def _bookmarkcurrent(self):
168 169 return bookmarks.readcurrent(self)
169 170
170 171 def _writebookmarks(self, marks):
171 172 bookmarks.write(self)
172 173
173 174 @filecache('phaseroots')
174 175 def _phaseroots(self):
176 self._dirtyphases = False
175 177 return phases.readroots(self)
176 178
177 179 @propertycache
178 180 def _phaserev(self):
179 181 cache = [0] * len(self)
180 182 for phase in phases.trackedphases:
181 183 roots = map(self.changelog.rev, self._phaseroots[phase])
182 184 if roots:
183 185 for rev in roots:
184 186 cache[rev] = phase
185 187 for rev in self.changelog.descendants(*roots):
186 188 cache[rev] = phase
187 189 return cache
188 190
189 191 @filecache('00changelog.i', True)
190 192 def changelog(self):
191 193 c = changelog.changelog(self.sopener)
192 194 if 'HG_PENDING' in os.environ:
193 195 p = os.environ['HG_PENDING']
194 196 if p.startswith(self.root):
195 197 c.readpending('00changelog.i.a')
196 198 return c
197 199
198 200 @filecache('00manifest.i', True)
199 201 def manifest(self):
200 202 return manifest.manifest(self.sopener)
201 203
202 204 @filecache('dirstate')
203 205 def dirstate(self):
204 206 warned = [0]
205 207 def validate(node):
206 208 try:
207 209 self.changelog.rev(node)
208 210 return node
209 211 except error.LookupError:
210 212 if not warned[0]:
211 213 warned[0] = True
212 214 self.ui.warn(_("warning: ignoring unknown"
213 215 " working parent %s!\n") % short(node))
214 216 return nullid
215 217
216 218 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
217 219
218 220 def __getitem__(self, changeid):
219 221 if changeid is None:
220 222 return context.workingctx(self)
221 223 return context.changectx(self, changeid)
222 224
223 225 def __contains__(self, changeid):
224 226 try:
225 227 return bool(self.lookup(changeid))
226 228 except error.RepoLookupError:
227 229 return False
228 230
229 231 def __nonzero__(self):
230 232 return True
231 233
232 234 def __len__(self):
233 235 return len(self.changelog)
234 236
235 237 def __iter__(self):
236 238 for i in xrange(len(self)):
237 239 yield i
238 240
239 241 def revs(self, expr, *args):
240 242 '''Return a list of revisions matching the given revset'''
241 243 expr = revset.formatspec(expr, *args)
242 244 m = revset.match(None, expr)
243 245 return [r for r in m(self, range(len(self)))]
244 246
245 247 def set(self, expr, *args):
246 248 '''
247 249 Yield a context for each matching revision, after doing arg
248 250 replacement via revset.formatspec
249 251 '''
250 252 for r in self.revs(expr, *args):
251 253 yield self[r]
252 254
253 255 def url(self):
254 256 return 'file:' + self.root
255 257
256 258 def hook(self, name, throw=False, **args):
257 259 return hook.hook(self.ui, self, name, throw, **args)
258 260
259 261 tag_disallowed = ':\r\n'
260 262
261 263 def _tag(self, names, node, message, local, user, date, extra={}):
262 264 if isinstance(names, str):
263 265 allchars = names
264 266 names = (names,)
265 267 else:
266 268 allchars = ''.join(names)
267 269 for c in self.tag_disallowed:
268 270 if c in allchars:
269 271 raise util.Abort(_('%r cannot be used in a tag name') % c)
270 272
271 273 branches = self.branchmap()
272 274 for name in names:
273 275 self.hook('pretag', throw=True, node=hex(node), tag=name,
274 276 local=local)
275 277 if name in branches:
276 278 self.ui.warn(_("warning: tag %s conflicts with existing"
277 279 " branch name\n") % name)
278 280
279 281 def writetags(fp, names, munge, prevtags):
280 282 fp.seek(0, 2)
281 283 if prevtags and prevtags[-1] != '\n':
282 284 fp.write('\n')
283 285 for name in names:
284 286 m = munge and munge(name) or name
285 287 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
286 288 old = self.tags().get(name, nullid)
287 289 fp.write('%s %s\n' % (hex(old), m))
288 290 fp.write('%s %s\n' % (hex(node), m))
289 291 fp.close()
290 292
291 293 prevtags = ''
292 294 if local:
293 295 try:
294 296 fp = self.opener('localtags', 'r+')
295 297 except IOError:
296 298 fp = self.opener('localtags', 'a')
297 299 else:
298 300 prevtags = fp.read()
299 301
300 302 # local tags are stored in the current charset
301 303 writetags(fp, names, None, prevtags)
302 304 for name in names:
303 305 self.hook('tag', node=hex(node), tag=name, local=local)
304 306 return
305 307
306 308 try:
307 309 fp = self.wfile('.hgtags', 'rb+')
308 310 except IOError, e:
309 311 if e.errno != errno.ENOENT:
310 312 raise
311 313 fp = self.wfile('.hgtags', 'ab')
312 314 else:
313 315 prevtags = fp.read()
314 316
315 317 # committed tags are stored in UTF-8
316 318 writetags(fp, names, encoding.fromlocal, prevtags)
317 319
318 320 fp.close()
319 321
320 322 if '.hgtags' not in self.dirstate:
321 323 self[None].add(['.hgtags'])
322 324
323 325 m = matchmod.exact(self.root, '', ['.hgtags'])
324 326 tagnode = self.commit(message, user, date, extra=extra, match=m)
325 327
326 328 for name in names:
327 329 self.hook('tag', node=hex(node), tag=name, local=local)
328 330
329 331 return tagnode
330 332
331 333 def tag(self, names, node, message, local, user, date):
332 334 '''tag a revision with one or more symbolic names.
333 335
334 336 names is a list of strings or, when adding a single tag, names may be a
335 337 string.
336 338
337 339 if local is True, the tags are stored in a per-repository file.
338 340 otherwise, they are stored in the .hgtags file, and a new
339 341 changeset is committed with the change.
340 342
341 343 keyword arguments:
342 344
343 345 local: whether to store tags in non-version-controlled file
344 346 (default False)
345 347
346 348 message: commit message to use if committing
347 349
348 350 user: name of user to use if committing
349 351
350 352 date: date tuple to use if committing'''
351 353
352 354 if not local:
353 355 for x in self.status()[:5]:
354 356 if '.hgtags' in x:
355 357 raise util.Abort(_('working copy of .hgtags is changed '
356 358 '(please commit .hgtags manually)'))
357 359
358 360 self.tags() # instantiate the cache
359 361 self._tag(names, node, message, local, user, date)
360 362
361 363 @propertycache
362 364 def _tagscache(self):
363 365 '''Returns a tagscache object that contains various tags related caches.'''
364 366
365 367 # This simplifies its cache management by having one decorated
366 368 # function (this one) and the rest simply fetch things from it.
367 369 class tagscache(object):
368 370 def __init__(self):
369 371 # These two define the set of tags for this repository. tags
370 372 # maps tag name to node; tagtypes maps tag name to 'global' or
371 373 # 'local'. (Global tags are defined by .hgtags across all
372 374 # heads, and local tags are defined in .hg/localtags.)
373 375 # They constitute the in-memory cache of tags.
374 376 self.tags = self.tagtypes = None
375 377
376 378 self.nodetagscache = self.tagslist = None
377 379
378 380 cache = tagscache()
379 381 cache.tags, cache.tagtypes = self._findtags()
380 382
381 383 return cache
382 384
383 385 def tags(self):
384 386 '''return a mapping of tag to node'''
385 387 return self._tagscache.tags
386 388
387 389 def _findtags(self):
388 390 '''Do the hard work of finding tags. Return a pair of dicts
389 391 (tags, tagtypes) where tags maps tag name to node, and tagtypes
390 392 maps tag name to a string like \'global\' or \'local\'.
391 393 Subclasses or extensions are free to add their own tags, but
392 394 should be aware that the returned dicts will be retained for the
393 395 duration of the localrepo object.'''
394 396
395 397 # XXX what tagtype should subclasses/extensions use? Currently
396 398 # mq and bookmarks add tags, but do not set the tagtype at all.
397 399 # Should each extension invent its own tag type? Should there
398 400 # be one tagtype for all such "virtual" tags? Or is the status
399 401 # quo fine?
400 402
401 403 alltags = {} # map tag name to (node, hist)
402 404 tagtypes = {}
403 405
404 406 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
405 407 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
406 408
407 409 # Build the return dicts. Have to re-encode tag names because
408 410 # the tags module always uses UTF-8 (in order not to lose info
409 411 # writing to the cache), but the rest of Mercurial wants them in
410 412 # local encoding.
411 413 tags = {}
412 414 for (name, (node, hist)) in alltags.iteritems():
413 415 if node != nullid:
414 416 try:
415 417 # ignore tags to unknown nodes
416 418 self.changelog.lookup(node)
417 419 tags[encoding.tolocal(name)] = node
418 420 except error.LookupError:
419 421 pass
420 422 tags['tip'] = self.changelog.tip()
421 423 tagtypes = dict([(encoding.tolocal(name), value)
422 424 for (name, value) in tagtypes.iteritems()])
423 425 return (tags, tagtypes)
424 426
425 427 def tagtype(self, tagname):
426 428 '''
427 429 return the type of the given tag. result can be:
428 430
429 431 'local' : a local tag
430 432 'global' : a global tag
431 433 None : tag does not exist
432 434 '''
433 435
434 436 return self._tagscache.tagtypes.get(tagname)
435 437
436 438 def tagslist(self):
437 439 '''return a list of tags ordered by revision'''
438 440 if not self._tagscache.tagslist:
439 441 l = []
440 442 for t, n in self.tags().iteritems():
441 443 r = self.changelog.rev(n)
442 444 l.append((r, t, n))
443 445 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
444 446
445 447 return self._tagscache.tagslist
446 448
447 449 def nodetags(self, node):
448 450 '''return the tags associated with a node'''
449 451 if not self._tagscache.nodetagscache:
450 452 nodetagscache = {}
451 453 for t, n in self.tags().iteritems():
452 454 nodetagscache.setdefault(n, []).append(t)
453 455 for tags in nodetagscache.itervalues():
454 456 tags.sort()
455 457 self._tagscache.nodetagscache = nodetagscache
456 458 return self._tagscache.nodetagscache.get(node, [])
457 459
458 460 def nodebookmarks(self, node):
459 461 marks = []
460 462 for bookmark, n in self._bookmarks.iteritems():
461 463 if n == node:
462 464 marks.append(bookmark)
463 465 return sorted(marks)
464 466
465 467 def _branchtags(self, partial, lrev):
466 468 # TODO: rename this function?
467 469 tiprev = len(self) - 1
468 470 if lrev != tiprev:
469 471 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
470 472 self._updatebranchcache(partial, ctxgen)
471 473 self._writebranchcache(partial, self.changelog.tip(), tiprev)
472 474
473 475 return partial
474 476
475 477 def updatebranchcache(self):
476 478 tip = self.changelog.tip()
477 479 if self._branchcache is not None and self._branchcachetip == tip:
478 480 return self._branchcache
479 481
480 482 oldtip = self._branchcachetip
481 483 self._branchcachetip = tip
482 484 if oldtip is None or oldtip not in self.changelog.nodemap:
483 485 partial, last, lrev = self._readbranchcache()
484 486 else:
485 487 lrev = self.changelog.rev(oldtip)
486 488 partial = self._branchcache
487 489
488 490 self._branchtags(partial, lrev)
489 491 # this private cache holds all heads (not just tips)
490 492 self._branchcache = partial
491 493
492 494 def branchmap(self):
493 495 '''returns a dictionary {branch: [branchheads]}'''
494 496 self.updatebranchcache()
495 497 return self._branchcache
496 498
497 499 def branchtags(self):
498 500 '''return a dict where branch names map to the tipmost head of
499 501 the branch, open heads come before closed'''
500 502 bt = {}
501 503 for bn, heads in self.branchmap().iteritems():
502 504 tip = heads[-1]
503 505 for h in reversed(heads):
504 506 if 'close' not in self.changelog.read(h)[5]:
505 507 tip = h
506 508 break
507 509 bt[bn] = tip
508 510 return bt
509 511
510 512 def _readbranchcache(self):
511 513 partial = {}
512 514 try:
513 515 f = self.opener("cache/branchheads")
514 516 lines = f.read().split('\n')
515 517 f.close()
516 518 except (IOError, OSError):
517 519 return {}, nullid, nullrev
518 520
519 521 try:
520 522 last, lrev = lines.pop(0).split(" ", 1)
521 523 last, lrev = bin(last), int(lrev)
522 524 if lrev >= len(self) or self[lrev].node() != last:
523 525 # invalidate the cache
524 526 raise ValueError('invalidating branch cache (tip differs)')
525 527 for l in lines:
526 528 if not l:
527 529 continue
528 530 node, label = l.split(" ", 1)
529 531 label = encoding.tolocal(label.strip())
530 532 partial.setdefault(label, []).append(bin(node))
531 533 except KeyboardInterrupt:
532 534 raise
533 535 except Exception, inst:
534 536 if self.ui.debugflag:
535 537 self.ui.warn(str(inst), '\n')
536 538 partial, last, lrev = {}, nullid, nullrev
537 539 return partial, last, lrev
538 540
539 541 def _writebranchcache(self, branches, tip, tiprev):
540 542 try:
541 543 f = self.opener("cache/branchheads", "w", atomictemp=True)
542 544 f.write("%s %s\n" % (hex(tip), tiprev))
543 545 for label, nodes in branches.iteritems():
544 546 for node in nodes:
545 547 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
546 548 f.close()
547 549 except (IOError, OSError):
548 550 pass
549 551
550 552 def _updatebranchcache(self, partial, ctxgen):
551 553 # collect new branch entries
552 554 newbranches = {}
553 555 for c in ctxgen:
554 556 newbranches.setdefault(c.branch(), []).append(c.node())
555 557 # if older branchheads are reachable from new ones, they aren't
556 558 # really branchheads. Note checking parents is insufficient:
557 559 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
558 560 for branch, newnodes in newbranches.iteritems():
559 561 bheads = partial.setdefault(branch, [])
560 562 bheads.extend(newnodes)
561 563 if len(bheads) <= 1:
562 564 continue
563 565 bheads = sorted(bheads, key=lambda x: self[x].rev())
564 566 # starting from tip means fewer passes over reachable
565 567 while newnodes:
566 568 latest = newnodes.pop()
567 569 if latest not in bheads:
568 570 continue
569 571 minbhrev = self[bheads[0]].node()
570 572 reachable = self.changelog.reachable(latest, minbhrev)
571 573 reachable.remove(latest)
572 574 if reachable:
573 575 bheads = [b for b in bheads if b not in reachable]
574 576 partial[branch] = bheads
575 577
576 578 def lookup(self, key):
577 579 if isinstance(key, int):
578 580 return self.changelog.node(key)
579 581 elif key == '.':
580 582 return self.dirstate.p1()
581 583 elif key == 'null':
582 584 return nullid
583 585 elif key == 'tip':
584 586 return self.changelog.tip()
585 587 n = self.changelog._match(key)
586 588 if n:
587 589 return n
588 590 if key in self._bookmarks:
589 591 return self._bookmarks[key]
590 592 if key in self.tags():
591 593 return self.tags()[key]
592 594 if key in self.branchtags():
593 595 return self.branchtags()[key]
594 596 n = self.changelog._partialmatch(key)
595 597 if n:
596 598 return n
597 599
598 600 # can't find key, check if it might have come from damaged dirstate
599 601 if key in self.dirstate.parents():
600 602 raise error.Abort(_("working directory has unknown parent '%s'!")
601 603 % short(key))
602 604 try:
603 605 if len(key) == 20:
604 606 key = hex(key)
605 607 except TypeError:
606 608 pass
607 609 raise error.RepoLookupError(_("unknown revision '%s'") % key)
608 610
609 611 def lookupbranch(self, key, remote=None):
610 612 repo = remote or self
611 613 if key in repo.branchmap():
612 614 return key
613 615
614 616 repo = (remote and remote.local()) and remote or self
615 617 return repo[key].branch()
616 618
617 619 def known(self, nodes):
618 620 nm = self.changelog.nodemap
619 621 return [(n in nm) for n in nodes]
620 622
621 623 def local(self):
622 624 return self
623 625
624 626 def join(self, f):
625 627 return os.path.join(self.path, f)
626 628
627 629 def wjoin(self, f):
628 630 return os.path.join(self.root, f)
629 631
630 632 def file(self, f):
631 633 if f[0] == '/':
632 634 f = f[1:]
633 635 return filelog.filelog(self.sopener, f)
634 636
635 637 def changectx(self, changeid):
636 638 return self[changeid]
637 639
638 640 def parents(self, changeid=None):
639 641 '''get list of changectxs for parents of changeid'''
640 642 return self[changeid].parents()
641 643
642 644 def filectx(self, path, changeid=None, fileid=None):
643 645 """changeid can be a changeset revision, node, or tag.
644 646 fileid can be a file revision or node."""
645 647 return context.filectx(self, path, changeid, fileid)
646 648
647 649 def getcwd(self):
648 650 return self.dirstate.getcwd()
649 651
650 652 def pathto(self, f, cwd=None):
651 653 return self.dirstate.pathto(f, cwd)
652 654
653 655 def wfile(self, f, mode='r'):
654 656 return self.wopener(f, mode)
655 657
656 658 def _link(self, f):
657 659 return os.path.islink(self.wjoin(f))
658 660
659 661 def _loadfilter(self, filter):
660 662 if filter not in self.filterpats:
661 663 l = []
662 664 for pat, cmd in self.ui.configitems(filter):
663 665 if cmd == '!':
664 666 continue
665 667 mf = matchmod.match(self.root, '', [pat])
666 668 fn = None
667 669 params = cmd
668 670 for name, filterfn in self._datafilters.iteritems():
669 671 if cmd.startswith(name):
670 672 fn = filterfn
671 673 params = cmd[len(name):].lstrip()
672 674 break
673 675 if not fn:
674 676 fn = lambda s, c, **kwargs: util.filter(s, c)
675 677 # Wrap old filters not supporting keyword arguments
676 678 if not inspect.getargspec(fn)[2]:
677 679 oldfn = fn
678 680 fn = lambda s, c, **kwargs: oldfn(s, c)
679 681 l.append((mf, fn, params))
680 682 self.filterpats[filter] = l
681 683 return self.filterpats[filter]
682 684
683 685 def _filter(self, filterpats, filename, data):
684 686 for mf, fn, cmd in filterpats:
685 687 if mf(filename):
686 688 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
687 689 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
688 690 break
689 691
690 692 return data
691 693
692 694 @propertycache
693 695 def _encodefilterpats(self):
694 696 return self._loadfilter('encode')
695 697
696 698 @propertycache
697 699 def _decodefilterpats(self):
698 700 return self._loadfilter('decode')
699 701
700 702 def adddatafilter(self, name, filter):
701 703 self._datafilters[name] = filter
702 704
703 705 def wread(self, filename):
704 706 if self._link(filename):
705 707 data = os.readlink(self.wjoin(filename))
706 708 else:
707 709 data = self.wopener.read(filename)
708 710 return self._filter(self._encodefilterpats, filename, data)
709 711
710 712 def wwrite(self, filename, data, flags):
711 713 data = self._filter(self._decodefilterpats, filename, data)
712 714 if 'l' in flags:
713 715 self.wopener.symlink(data, filename)
714 716 else:
715 717 self.wopener.write(filename, data)
716 718 if 'x' in flags:
717 719 util.setflags(self.wjoin(filename), False, True)
718 720
719 721 def wwritedata(self, filename, data):
720 722 return self._filter(self._decodefilterpats, filename, data)
721 723
722 724 def transaction(self, desc):
723 725 tr = self._transref and self._transref() or None
724 726 if tr and tr.running():
725 727 return tr.nest()
726 728
727 729 # abort here if the journal already exists
728 730 if os.path.exists(self.sjoin("journal")):
729 731 raise error.RepoError(
730 732 _("abandoned transaction found - run hg recover"))
731 733
732 734 journalfiles = self._writejournal(desc)
733 735 renames = [(x, undoname(x)) for x in journalfiles]
734 736
735 737 tr = transaction.transaction(self.ui.warn, self.sopener,
736 738 self.sjoin("journal"),
737 739 aftertrans(renames),
738 740 self.store.createmode)
739 741 self._transref = weakref.ref(tr)
740 742 return tr
741 743
742 744 def _writejournal(self, desc):
743 745 # save dirstate for rollback
744 746 try:
745 747 ds = self.opener.read("dirstate")
746 748 except IOError:
747 749 ds = ""
748 750 self.opener.write("journal.dirstate", ds)
749 751 self.opener.write("journal.branch",
750 752 encoding.fromlocal(self.dirstate.branch()))
751 753 self.opener.write("journal.desc",
752 754 "%d\n%s\n" % (len(self), desc))
753 755
754 756 bkname = self.join('bookmarks')
755 757 if os.path.exists(bkname):
756 758 util.copyfile(bkname, self.join('journal.bookmarks'))
757 759 else:
758 760 self.opener.write('journal.bookmarks', '')
759 761
760 762 return (self.sjoin('journal'), self.join('journal.dirstate'),
761 763 self.join('journal.branch'), self.join('journal.desc'),
762 764 self.join('journal.bookmarks'))
763 765
764 766 def recover(self):
765 767 lock = self.lock()
766 768 try:
767 769 if os.path.exists(self.sjoin("journal")):
768 770 self.ui.status(_("rolling back interrupted transaction\n"))
769 771 transaction.rollback(self.sopener, self.sjoin("journal"),
770 772 self.ui.warn)
771 773 self.invalidate()
772 774 return True
773 775 else:
774 776 self.ui.warn(_("no interrupted transaction available\n"))
775 777 return False
776 778 finally:
777 779 lock.release()
778 780
779 781 def rollback(self, dryrun=False, force=False):
780 782 wlock = lock = None
781 783 try:
782 784 wlock = self.wlock()
783 785 lock = self.lock()
784 786 if os.path.exists(self.sjoin("undo")):
785 787 return self._rollback(dryrun, force)
786 788 else:
787 789 self.ui.warn(_("no rollback information available\n"))
788 790 return 1
789 791 finally:
790 792 release(lock, wlock)
791 793
792 794 def _rollback(self, dryrun, force):
793 795 ui = self.ui
794 796 try:
795 797 args = self.opener.read('undo.desc').splitlines()
796 798 (oldlen, desc, detail) = (int(args[0]), args[1], None)
797 799 if len(args) >= 3:
798 800 detail = args[2]
799 801 oldtip = oldlen - 1
800 802
801 803 if detail and ui.verbose:
802 804 msg = (_('repository tip rolled back to revision %s'
803 805 ' (undo %s: %s)\n')
804 806 % (oldtip, desc, detail))
805 807 else:
806 808 msg = (_('repository tip rolled back to revision %s'
807 809 ' (undo %s)\n')
808 810 % (oldtip, desc))
809 811 except IOError:
810 812 msg = _('rolling back unknown transaction\n')
811 813 desc = None
812 814
813 815 if not force and self['.'] != self['tip'] and desc == 'commit':
814 816 raise util.Abort(
815 817 _('rollback of last commit while not checked out '
816 818 'may lose data'), hint=_('use -f to force'))
817 819
818 820 ui.status(msg)
819 821 if dryrun:
820 822 return 0
821 823
822 824 parents = self.dirstate.parents()
823 825 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
824 826 if os.path.exists(self.join('undo.bookmarks')):
825 827 util.rename(self.join('undo.bookmarks'),
826 828 self.join('bookmarks'))
827 829 self.invalidate()
828 830
829 831 parentgone = (parents[0] not in self.changelog.nodemap or
830 832 parents[1] not in self.changelog.nodemap)
831 833 if parentgone:
832 834 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
833 835 try:
834 836 branch = self.opener.read('undo.branch')
835 837 self.dirstate.setbranch(branch)
836 838 except IOError:
837 839 ui.warn(_('named branch could not be reset: '
838 840 'current branch is still \'%s\'\n')
839 841 % self.dirstate.branch())
840 842
841 843 self.dirstate.invalidate()
842 844 self.destroyed()
843 845 parents = tuple([p.rev() for p in self.parents()])
844 846 if len(parents) > 1:
845 847 ui.status(_('working directory now based on '
846 848 'revisions %d and %d\n') % parents)
847 849 else:
848 850 ui.status(_('working directory now based on '
849 851 'revision %d\n') % parents)
850 852 return 0
851 853
852 854 def invalidatecaches(self):
853 855 try:
854 856 delattr(self, '_tagscache')
855 857 except AttributeError:
856 858 pass
857 859
858 860 self._branchcache = None # in UTF-8
859 861 self._branchcachetip = None
860 862
861 863 def invalidatedirstate(self):
862 864 '''Invalidates the dirstate, causing the next call to dirstate
863 865 to check if it was modified since the last time it was read,
864 866 rereading it if it has.
865 867
866 868 This is different to dirstate.invalidate() that it doesn't always
867 869 rereads the dirstate. Use dirstate.invalidate() if you want to
868 870 explicitly read the dirstate again (i.e. restoring it to a previous
869 871 known good state).'''
870 872 try:
871 873 delattr(self, 'dirstate')
872 874 except AttributeError:
873 875 pass
874 876
875 877 def invalidate(self):
876 878 for k in self._filecache:
877 879 # dirstate is invalidated separately in invalidatedirstate()
878 880 if k == 'dirstate':
879 881 continue
880 882
881 883 try:
882 884 delattr(self, k)
883 885 except AttributeError:
884 886 pass
885 887 self.invalidatecaches()
886 888
887 889 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
888 890 try:
889 891 l = lock.lock(lockname, 0, releasefn, desc=desc)
890 892 except error.LockHeld, inst:
891 893 if not wait:
892 894 raise
893 895 self.ui.warn(_("waiting for lock on %s held by %r\n") %
894 896 (desc, inst.locker))
895 897 # default to 600 seconds timeout
896 898 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
897 899 releasefn, desc=desc)
898 900 if acquirefn:
899 901 acquirefn()
900 902 return l
901 903
902 904 def lock(self, wait=True):
903 905 '''Lock the repository store (.hg/store) and return a weak reference
904 906 to the lock. Use this before modifying the store (e.g. committing or
905 907 stripping). If you are opening a transaction, get a lock as well.)'''
906 908 l = self._lockref and self._lockref()
907 909 if l is not None and l.held:
908 910 l.lock()
909 911 return l
910 912
911 913 def unlock():
912 914 self.store.write()
915 if self._dirtyphases:
916 phases.writeroots(self)
913 917 for k, ce in self._filecache.items():
914 918 if k == 'dirstate':
915 919 continue
916 920 ce.refresh()
917 921
918 922 l = self._lock(self.sjoin("lock"), wait, unlock,
919 923 self.invalidate, _('repository %s') % self.origroot)
920 924 self._lockref = weakref.ref(l)
921 925 return l
922 926
923 927 def wlock(self, wait=True):
924 928 '''Lock the non-store parts of the repository (everything under
925 929 .hg except .hg/store) and return a weak reference to the lock.
926 930 Use this before modifying files in .hg.'''
927 931 l = self._wlockref and self._wlockref()
928 932 if l is not None and l.held:
929 933 l.lock()
930 934 return l
931 935
932 936 def unlock():
933 937 self.dirstate.write()
934 938 ce = self._filecache.get('dirstate')
935 939 if ce:
936 940 ce.refresh()
937 941
938 942 l = self._lock(self.join("wlock"), wait, unlock,
939 943 self.invalidatedirstate, _('working directory of %s') %
940 944 self.origroot)
941 945 self._wlockref = weakref.ref(l)
942 946 return l
943 947
944 948 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
945 949 """
946 950 commit an individual file as part of a larger transaction
947 951 """
948 952
949 953 fname = fctx.path()
950 954 text = fctx.data()
951 955 flog = self.file(fname)
952 956 fparent1 = manifest1.get(fname, nullid)
953 957 fparent2 = fparent2o = manifest2.get(fname, nullid)
954 958
955 959 meta = {}
956 960 copy = fctx.renamed()
957 961 if copy and copy[0] != fname:
958 962 # Mark the new revision of this file as a copy of another
959 963 # file. This copy data will effectively act as a parent
960 964 # of this new revision. If this is a merge, the first
961 965 # parent will be the nullid (meaning "look up the copy data")
962 966 # and the second one will be the other parent. For example:
963 967 #
964 968 # 0 --- 1 --- 3 rev1 changes file foo
965 969 # \ / rev2 renames foo to bar and changes it
966 970 # \- 2 -/ rev3 should have bar with all changes and
967 971 # should record that bar descends from
968 972 # bar in rev2 and foo in rev1
969 973 #
970 974 # this allows this merge to succeed:
971 975 #
972 976 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
973 977 # \ / merging rev3 and rev4 should use bar@rev2
974 978 # \- 2 --- 4 as the merge base
975 979 #
976 980
977 981 cfname = copy[0]
978 982 crev = manifest1.get(cfname)
979 983 newfparent = fparent2
980 984
981 985 if manifest2: # branch merge
982 986 if fparent2 == nullid or crev is None: # copied on remote side
983 987 if cfname in manifest2:
984 988 crev = manifest2[cfname]
985 989 newfparent = fparent1
986 990
987 991 # find source in nearest ancestor if we've lost track
988 992 if not crev:
989 993 self.ui.debug(" %s: searching for copy revision for %s\n" %
990 994 (fname, cfname))
991 995 for ancestor in self[None].ancestors():
992 996 if cfname in ancestor:
993 997 crev = ancestor[cfname].filenode()
994 998 break
995 999
996 1000 if crev:
997 1001 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
998 1002 meta["copy"] = cfname
999 1003 meta["copyrev"] = hex(crev)
1000 1004 fparent1, fparent2 = nullid, newfparent
1001 1005 else:
1002 1006 self.ui.warn(_("warning: can't find ancestor for '%s' "
1003 1007 "copied from '%s'!\n") % (fname, cfname))
1004 1008
1005 1009 elif fparent2 != nullid:
1006 1010 # is one parent an ancestor of the other?
1007 1011 fparentancestor = flog.ancestor(fparent1, fparent2)
1008 1012 if fparentancestor == fparent1:
1009 1013 fparent1, fparent2 = fparent2, nullid
1010 1014 elif fparentancestor == fparent2:
1011 1015 fparent2 = nullid
1012 1016
1013 1017 # is the file changed?
1014 1018 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1015 1019 changelist.append(fname)
1016 1020 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1017 1021
1018 1022 # are just the flags changed during merge?
1019 1023 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1020 1024 changelist.append(fname)
1021 1025
1022 1026 return fparent1
1023 1027
1024 1028 def commit(self, text="", user=None, date=None, match=None, force=False,
1025 1029 editor=False, extra={}):
1026 1030 """Add a new revision to current repository.
1027 1031
1028 1032 Revision information is gathered from the working directory,
1029 1033 match can be used to filter the committed files. If editor is
1030 1034 supplied, it is called to get a commit message.
1031 1035 """
1032 1036
1033 1037 def fail(f, msg):
1034 1038 raise util.Abort('%s: %s' % (f, msg))
1035 1039
1036 1040 if not match:
1037 1041 match = matchmod.always(self.root, '')
1038 1042
1039 1043 if not force:
1040 1044 vdirs = []
1041 1045 match.dir = vdirs.append
1042 1046 match.bad = fail
1043 1047
1044 1048 wlock = self.wlock()
1045 1049 try:
1046 1050 wctx = self[None]
1047 1051 merge = len(wctx.parents()) > 1
1048 1052
1049 1053 if (not force and merge and match and
1050 1054 (match.files() or match.anypats())):
1051 1055 raise util.Abort(_('cannot partially commit a merge '
1052 1056 '(do not specify files or patterns)'))
1053 1057
1054 1058 changes = self.status(match=match, clean=force)
1055 1059 if force:
1056 1060 changes[0].extend(changes[6]) # mq may commit unchanged files
1057 1061
1058 1062 # check subrepos
1059 1063 subs = []
1060 1064 removedsubs = set()
1061 1065 if '.hgsub' in wctx:
1062 1066 # only manage subrepos and .hgsubstate if .hgsub is present
1063 1067 for p in wctx.parents():
1064 1068 removedsubs.update(s for s in p.substate if match(s))
1065 1069 for s in wctx.substate:
1066 1070 removedsubs.discard(s)
1067 1071 if match(s) and wctx.sub(s).dirty():
1068 1072 subs.append(s)
1069 1073 if (subs or removedsubs):
1070 1074 if (not match('.hgsub') and
1071 1075 '.hgsub' in (wctx.modified() + wctx.added())):
1072 1076 raise util.Abort(
1073 1077 _("can't commit subrepos without .hgsub"))
1074 1078 if '.hgsubstate' not in changes[0]:
1075 1079 changes[0].insert(0, '.hgsubstate')
1076 1080 if '.hgsubstate' in changes[2]:
1077 1081 changes[2].remove('.hgsubstate')
1078 1082 elif '.hgsub' in changes[2]:
1079 1083 # clean up .hgsubstate when .hgsub is removed
1080 1084 if ('.hgsubstate' in wctx and
1081 1085 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1082 1086 changes[2].insert(0, '.hgsubstate')
1083 1087
1084 1088 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1085 1089 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1086 1090 if changedsubs:
1087 1091 raise util.Abort(_("uncommitted changes in subrepo %s")
1088 1092 % changedsubs[0],
1089 1093 hint=_("use --subrepos for recursive commit"))
1090 1094
1091 1095 # make sure all explicit patterns are matched
1092 1096 if not force and match.files():
1093 1097 matched = set(changes[0] + changes[1] + changes[2])
1094 1098
1095 1099 for f in match.files():
1096 1100 if f == '.' or f in matched or f in wctx.substate:
1097 1101 continue
1098 1102 if f in changes[3]: # missing
1099 1103 fail(f, _('file not found!'))
1100 1104 if f in vdirs: # visited directory
1101 1105 d = f + '/'
1102 1106 for mf in matched:
1103 1107 if mf.startswith(d):
1104 1108 break
1105 1109 else:
1106 1110 fail(f, _("no match under directory!"))
1107 1111 elif f not in self.dirstate:
1108 1112 fail(f, _("file not tracked!"))
1109 1113
1110 1114 if (not force and not extra.get("close") and not merge
1111 1115 and not (changes[0] or changes[1] or changes[2])
1112 1116 and wctx.branch() == wctx.p1().branch()):
1113 1117 return None
1114 1118
1115 1119 ms = mergemod.mergestate(self)
1116 1120 for f in changes[0]:
1117 1121 if f in ms and ms[f] == 'u':
1118 1122 raise util.Abort(_("unresolved merge conflicts "
1119 1123 "(see hg help resolve)"))
1120 1124
1121 1125 cctx = context.workingctx(self, text, user, date, extra, changes)
1122 1126 if editor:
1123 1127 cctx._text = editor(self, cctx, subs)
1124 1128 edited = (text != cctx._text)
1125 1129
1126 1130 # commit subs
1127 1131 if subs or removedsubs:
1128 1132 state = wctx.substate.copy()
1129 1133 for s in sorted(subs):
1130 1134 sub = wctx.sub(s)
1131 1135 self.ui.status(_('committing subrepository %s\n') %
1132 1136 subrepo.subrelpath(sub))
1133 1137 sr = sub.commit(cctx._text, user, date)
1134 1138 state[s] = (state[s][0], sr)
1135 1139 subrepo.writestate(self, state)
1136 1140
1137 1141 # Save commit message in case this transaction gets rolled back
1138 1142 # (e.g. by a pretxncommit hook). Leave the content alone on
1139 1143 # the assumption that the user will use the same editor again.
1140 1144 msgfn = self.savecommitmessage(cctx._text)
1141 1145
1142 1146 p1, p2 = self.dirstate.parents()
1143 1147 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1144 1148 try:
1145 1149 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1146 1150 ret = self.commitctx(cctx, True)
1147 1151 except:
1148 1152 if edited:
1149 1153 self.ui.write(
1150 1154 _('note: commit message saved in %s\n') % msgfn)
1151 1155 raise
1152 1156
1153 1157 # update bookmarks, dirstate and mergestate
1154 1158 bookmarks.update(self, p1, ret)
1155 1159 for f in changes[0] + changes[1]:
1156 1160 self.dirstate.normal(f)
1157 1161 for f in changes[2]:
1158 1162 self.dirstate.drop(f)
1159 1163 self.dirstate.setparents(ret)
1160 1164 ms.reset()
1161 1165 finally:
1162 1166 wlock.release()
1163 1167
1164 1168 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1165 1169 return ret
1166 1170
1167 1171 def commitctx(self, ctx, error=False):
1168 1172 """Add a new revision to current repository.
1169 1173 Revision information is passed via the context argument.
1170 1174 """
1171 1175
1172 1176 tr = lock = None
1173 1177 removed = list(ctx.removed())
1174 1178 p1, p2 = ctx.p1(), ctx.p2()
1175 1179 user = ctx.user()
1176 1180
1177 1181 lock = self.lock()
1178 1182 try:
1179 1183 tr = self.transaction("commit")
1180 1184 trp = weakref.proxy(tr)
1181 1185
1182 1186 if ctx.files():
1183 1187 m1 = p1.manifest().copy()
1184 1188 m2 = p2.manifest()
1185 1189
1186 1190 # check in files
1187 1191 new = {}
1188 1192 changed = []
1189 1193 linkrev = len(self)
1190 1194 for f in sorted(ctx.modified() + ctx.added()):
1191 1195 self.ui.note(f + "\n")
1192 1196 try:
1193 1197 fctx = ctx[f]
1194 1198 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1195 1199 changed)
1196 1200 m1.set(f, fctx.flags())
1197 1201 except OSError, inst:
1198 1202 self.ui.warn(_("trouble committing %s!\n") % f)
1199 1203 raise
1200 1204 except IOError, inst:
1201 1205 errcode = getattr(inst, 'errno', errno.ENOENT)
1202 1206 if error or errcode and errcode != errno.ENOENT:
1203 1207 self.ui.warn(_("trouble committing %s!\n") % f)
1204 1208 raise
1205 1209 else:
1206 1210 removed.append(f)
1207 1211
1208 1212 # update manifest
1209 1213 m1.update(new)
1210 1214 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1211 1215 drop = [f for f in removed if f in m1]
1212 1216 for f in drop:
1213 1217 del m1[f]
1214 1218 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1215 1219 p2.manifestnode(), (new, drop))
1216 1220 files = changed + removed
1217 1221 else:
1218 1222 mn = p1.manifestnode()
1219 1223 files = []
1220 1224
1221 1225 # update changelog
1222 1226 self.changelog.delayupdate()
1223 1227 n = self.changelog.add(mn, files, ctx.description(),
1224 1228 trp, p1.node(), p2.node(),
1225 1229 user, ctx.date(), ctx.extra().copy())
1226 1230 p = lambda: self.changelog.writepending() and self.root or ""
1227 1231 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1228 1232 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1229 1233 parent2=xp2, pending=p)
1230 1234 self.changelog.finalize(trp)
1231 1235 tr.close()
1232 1236
1233 1237 if self._branchcache:
1234 1238 self.updatebranchcache()
1235 1239 return n
1236 1240 finally:
1237 1241 if tr:
1238 1242 tr.release()
1239 1243 lock.release()
1240 1244
1241 1245 def destroyed(self):
1242 1246 '''Inform the repository that nodes have been destroyed.
1243 1247 Intended for use by strip and rollback, so there's a common
1244 1248 place for anything that has to be done after destroying history.'''
1245 1249 # XXX it might be nice if we could take the list of destroyed
1246 1250 # nodes, but I don't see an easy way for rollback() to do that
1247 1251
1248 1252 # Ensure the persistent tag cache is updated. Doing it now
1249 1253 # means that the tag cache only has to worry about destroyed
1250 1254 # heads immediately after a strip/rollback. That in turn
1251 1255 # guarantees that "cachetip == currenttip" (comparing both rev
1252 1256 # and node) always means no nodes have been added or destroyed.
1253 1257
1254 1258 # XXX this is suboptimal when qrefresh'ing: we strip the current
1255 1259 # head, refresh the tag cache, then immediately add a new head.
1256 1260 # But I think doing it this way is necessary for the "instant
1257 1261 # tag cache retrieval" case to work.
1258 1262 self.invalidatecaches()
1259 1263
1260 1264 def walk(self, match, node=None):
1261 1265 '''
1262 1266 walk recursively through the directory tree or a given
1263 1267 changeset, finding all files matched by the match
1264 1268 function
1265 1269 '''
1266 1270 return self[node].walk(match)
1267 1271
1268 1272 def status(self, node1='.', node2=None, match=None,
1269 1273 ignored=False, clean=False, unknown=False,
1270 1274 listsubrepos=False):
1271 1275 """return status of files between two nodes or node and working directory
1272 1276
1273 1277 If node1 is None, use the first dirstate parent instead.
1274 1278 If node2 is None, compare node1 with working directory.
1275 1279 """
1276 1280
1277 1281 def mfmatches(ctx):
1278 1282 mf = ctx.manifest().copy()
1279 1283 for fn in mf.keys():
1280 1284 if not match(fn):
1281 1285 del mf[fn]
1282 1286 return mf
1283 1287
1284 1288 if isinstance(node1, context.changectx):
1285 1289 ctx1 = node1
1286 1290 else:
1287 1291 ctx1 = self[node1]
1288 1292 if isinstance(node2, context.changectx):
1289 1293 ctx2 = node2
1290 1294 else:
1291 1295 ctx2 = self[node2]
1292 1296
1293 1297 working = ctx2.rev() is None
1294 1298 parentworking = working and ctx1 == self['.']
1295 1299 match = match or matchmod.always(self.root, self.getcwd())
1296 1300 listignored, listclean, listunknown = ignored, clean, unknown
1297 1301
1298 1302 # load earliest manifest first for caching reasons
1299 1303 if not working and ctx2.rev() < ctx1.rev():
1300 1304 ctx2.manifest()
1301 1305
1302 1306 if not parentworking:
1303 1307 def bad(f, msg):
1304 1308 if f not in ctx1:
1305 1309 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1306 1310 match.bad = bad
1307 1311
1308 1312 if working: # we need to scan the working dir
1309 1313 subrepos = []
1310 1314 if '.hgsub' in self.dirstate:
1311 1315 subrepos = ctx2.substate.keys()
1312 1316 s = self.dirstate.status(match, subrepos, listignored,
1313 1317 listclean, listunknown)
1314 1318 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1315 1319
1316 1320 # check for any possibly clean files
1317 1321 if parentworking and cmp:
1318 1322 fixup = []
1319 1323 # do a full compare of any files that might have changed
1320 1324 for f in sorted(cmp):
1321 1325 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1322 1326 or ctx1[f].cmp(ctx2[f])):
1323 1327 modified.append(f)
1324 1328 else:
1325 1329 fixup.append(f)
1326 1330
1327 1331 # update dirstate for files that are actually clean
1328 1332 if fixup:
1329 1333 if listclean:
1330 1334 clean += fixup
1331 1335
1332 1336 try:
1333 1337 # updating the dirstate is optional
1334 1338 # so we don't wait on the lock
1335 1339 wlock = self.wlock(False)
1336 1340 try:
1337 1341 for f in fixup:
1338 1342 self.dirstate.normal(f)
1339 1343 finally:
1340 1344 wlock.release()
1341 1345 except error.LockError:
1342 1346 pass
1343 1347
1344 1348 if not parentworking:
1345 1349 mf1 = mfmatches(ctx1)
1346 1350 if working:
1347 1351 # we are comparing working dir against non-parent
1348 1352 # generate a pseudo-manifest for the working dir
1349 1353 mf2 = mfmatches(self['.'])
1350 1354 for f in cmp + modified + added:
1351 1355 mf2[f] = None
1352 1356 mf2.set(f, ctx2.flags(f))
1353 1357 for f in removed:
1354 1358 if f in mf2:
1355 1359 del mf2[f]
1356 1360 else:
1357 1361 # we are comparing two revisions
1358 1362 deleted, unknown, ignored = [], [], []
1359 1363 mf2 = mfmatches(ctx2)
1360 1364
1361 1365 modified, added, clean = [], [], []
1362 1366 for fn in mf2:
1363 1367 if fn in mf1:
1364 1368 if (fn not in deleted and
1365 1369 (mf1.flags(fn) != mf2.flags(fn) or
1366 1370 (mf1[fn] != mf2[fn] and
1367 1371 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1368 1372 modified.append(fn)
1369 1373 elif listclean:
1370 1374 clean.append(fn)
1371 1375 del mf1[fn]
1372 1376 elif fn not in deleted:
1373 1377 added.append(fn)
1374 1378 removed = mf1.keys()
1375 1379
1376 1380 if working and modified and not self.dirstate._checklink:
1377 1381 # Symlink placeholders may get non-symlink-like contents
1378 1382 # via user error or dereferencing by NFS or Samba servers,
1379 1383 # so we filter out any placeholders that don't look like a
1380 1384 # symlink
1381 1385 sane = []
1382 1386 for f in modified:
1383 1387 if ctx2.flags(f) == 'l':
1384 1388 d = ctx2[f].data()
1385 1389 if len(d) >= 1024 or '\n' in d or util.binary(d):
1386 1390 self.ui.debug('ignoring suspect symlink placeholder'
1387 1391 ' "%s"\n' % f)
1388 1392 continue
1389 1393 sane.append(f)
1390 1394 modified = sane
1391 1395
1392 1396 r = modified, added, removed, deleted, unknown, ignored, clean
1393 1397
1394 1398 if listsubrepos:
1395 1399 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1396 1400 if working:
1397 1401 rev2 = None
1398 1402 else:
1399 1403 rev2 = ctx2.substate[subpath][1]
1400 1404 try:
1401 1405 submatch = matchmod.narrowmatcher(subpath, match)
1402 1406 s = sub.status(rev2, match=submatch, ignored=listignored,
1403 1407 clean=listclean, unknown=listunknown,
1404 1408 listsubrepos=True)
1405 1409 for rfiles, sfiles in zip(r, s):
1406 1410 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1407 1411 except error.LookupError:
1408 1412 self.ui.status(_("skipping missing subrepository: %s\n")
1409 1413 % subpath)
1410 1414
1411 1415 for l in r:
1412 1416 l.sort()
1413 1417 return r
1414 1418
1415 1419 def heads(self, start=None):
1416 1420 heads = self.changelog.heads(start)
1417 1421 # sort the output in rev descending order
1418 1422 return sorted(heads, key=self.changelog.rev, reverse=True)
1419 1423
1420 1424 def branchheads(self, branch=None, start=None, closed=False):
1421 1425 '''return a (possibly filtered) list of heads for the given branch
1422 1426
1423 1427 Heads are returned in topological order, from newest to oldest.
1424 1428 If branch is None, use the dirstate branch.
1425 1429 If start is not None, return only heads reachable from start.
1426 1430 If closed is True, return heads that are marked as closed as well.
1427 1431 '''
1428 1432 if branch is None:
1429 1433 branch = self[None].branch()
1430 1434 branches = self.branchmap()
1431 1435 if branch not in branches:
1432 1436 return []
1433 1437 # the cache returns heads ordered lowest to highest
1434 1438 bheads = list(reversed(branches[branch]))
1435 1439 if start is not None:
1436 1440 # filter out the heads that cannot be reached from startrev
1437 1441 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1438 1442 bheads = [h for h in bheads if h in fbheads]
1439 1443 if not closed:
1440 1444 bheads = [h for h in bheads if
1441 1445 ('close' not in self.changelog.read(h)[5])]
1442 1446 return bheads
1443 1447
1444 1448 def branches(self, nodes):
1445 1449 if not nodes:
1446 1450 nodes = [self.changelog.tip()]
1447 1451 b = []
1448 1452 for n in nodes:
1449 1453 t = n
1450 1454 while True:
1451 1455 p = self.changelog.parents(n)
1452 1456 if p[1] != nullid or p[0] == nullid:
1453 1457 b.append((t, n, p[0], p[1]))
1454 1458 break
1455 1459 n = p[0]
1456 1460 return b
1457 1461
1458 1462 def between(self, pairs):
1459 1463 r = []
1460 1464
1461 1465 for top, bottom in pairs:
1462 1466 n, l, i = top, [], 0
1463 1467 f = 1
1464 1468
1465 1469 while n != bottom and n != nullid:
1466 1470 p = self.changelog.parents(n)[0]
1467 1471 if i == f:
1468 1472 l.append(n)
1469 1473 f = f * 2
1470 1474 n = p
1471 1475 i += 1
1472 1476
1473 1477 r.append(l)
1474 1478
1475 1479 return r
1476 1480
1477 1481 def pull(self, remote, heads=None, force=False):
1478 1482 lock = self.lock()
1479 1483 try:
1480 1484 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1481 1485 force=force)
1482 1486 common, fetch, rheads = tmp
1483 1487 if not fetch:
1484 1488 self.ui.status(_("no changes found\n"))
1485 1489 result = 0
1486 1490 else:
1487 1491 if heads is None and list(common) == [nullid]:
1488 1492 self.ui.status(_("requesting all changes\n"))
1489 1493 elif heads is None and remote.capable('changegroupsubset'):
1490 1494 # issue1320, avoid a race if remote changed after discovery
1491 1495 heads = rheads
1492 1496
1493 1497 if remote.capable('getbundle'):
1494 1498 cg = remote.getbundle('pull', common=common,
1495 1499 heads=heads or rheads)
1496 1500 elif heads is None:
1497 1501 cg = remote.changegroup(fetch, 'pull')
1498 1502 elif not remote.capable('changegroupsubset'):
1499 1503 raise util.Abort(_("partial pull cannot be done because "
1500 1504 "other repository doesn't support "
1501 1505 "changegroupsubset."))
1502 1506 else:
1503 1507 cg = remote.changegroupsubset(fetch, heads, 'pull')
1504 1508 result = self.addchangegroup(cg, 'pull', remote.url(),
1505 1509 lock=lock)
1506 1510 finally:
1507 1511 lock.release()
1508 1512
1509 1513 return result
1510 1514
1511 1515 def checkpush(self, force, revs):
1512 1516 """Extensions can override this function if additional checks have
1513 1517 to be performed before pushing, or call it if they override push
1514 1518 command.
1515 1519 """
1516 1520 pass
1517 1521
1518 1522 def push(self, remote, force=False, revs=None, newbranch=False):
1519 1523 '''Push outgoing changesets (limited by revs) from the current
1520 1524 repository to remote. Return an integer:
1521 1525 - 0 means HTTP error *or* nothing to push
1522 1526 - 1 means we pushed and remote head count is unchanged *or*
1523 1527 we have outgoing changesets but refused to push
1524 1528 - other values as described by addchangegroup()
1525 1529 '''
1526 1530 # there are two ways to push to remote repo:
1527 1531 #
1528 1532 # addchangegroup assumes local user can lock remote
1529 1533 # repo (local filesystem, old ssh servers).
1530 1534 #
1531 1535 # unbundle assumes local user cannot lock remote repo (new ssh
1532 1536 # servers, http servers).
1533 1537
1534 1538 self.checkpush(force, revs)
1535 1539 lock = None
1536 1540 unbundle = remote.capable('unbundle')
1537 1541 if not unbundle:
1538 1542 lock = remote.lock()
1539 1543 try:
1540 1544 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1541 1545 newbranch)
1542 1546 ret = remote_heads
1543 1547 if cg is not None:
1544 1548 if unbundle:
1545 1549 # local repo finds heads on server, finds out what
1546 1550 # revs it must push. once revs transferred, if server
1547 1551 # finds it has different heads (someone else won
1548 1552 # commit/push race), server aborts.
1549 1553 if force:
1550 1554 remote_heads = ['force']
1551 1555 # ssh: return remote's addchangegroup()
1552 1556 # http: return remote's addchangegroup() or 0 for error
1553 1557 ret = remote.unbundle(cg, remote_heads, 'push')
1554 1558 else:
1555 1559 # we return an integer indicating remote head count change
1556 1560 ret = remote.addchangegroup(cg, 'push', self.url(),
1557 1561 lock=lock)
1558 1562 finally:
1559 1563 if lock is not None:
1560 1564 lock.release()
1561 1565
1562 1566 self.ui.debug("checking for updated bookmarks\n")
1563 1567 rb = remote.listkeys('bookmarks')
1564 1568 for k in rb.keys():
1565 1569 if k in self._bookmarks:
1566 1570 nr, nl = rb[k], hex(self._bookmarks[k])
1567 1571 if nr in self:
1568 1572 cr = self[nr]
1569 1573 cl = self[nl]
1570 1574 if cl in cr.descendants():
1571 1575 r = remote.pushkey('bookmarks', k, nr, nl)
1572 1576 if r:
1573 1577 self.ui.status(_("updating bookmark %s\n") % k)
1574 1578 else:
1575 1579 self.ui.warn(_('updating bookmark %s'
1576 1580 ' failed!\n') % k)
1577 1581
1578 1582 return ret
1579 1583
1580 1584 def changegroupinfo(self, nodes, source):
1581 1585 if self.ui.verbose or source == 'bundle':
1582 1586 self.ui.status(_("%d changesets found\n") % len(nodes))
1583 1587 if self.ui.debugflag:
1584 1588 self.ui.debug("list of changesets:\n")
1585 1589 for node in nodes:
1586 1590 self.ui.debug("%s\n" % hex(node))
1587 1591
1588 1592 def changegroupsubset(self, bases, heads, source):
1589 1593 """Compute a changegroup consisting of all the nodes that are
1590 1594 descendants of any of the bases and ancestors of any of the heads.
1591 1595 Return a chunkbuffer object whose read() method will return
1592 1596 successive changegroup chunks.
1593 1597
1594 1598 It is fairly complex as determining which filenodes and which
1595 1599 manifest nodes need to be included for the changeset to be complete
1596 1600 is non-trivial.
1597 1601
1598 1602 Another wrinkle is doing the reverse, figuring out which changeset in
1599 1603 the changegroup a particular filenode or manifestnode belongs to.
1600 1604 """
1601 1605 cl = self.changelog
1602 1606 if not bases:
1603 1607 bases = [nullid]
1604 1608 csets, bases, heads = cl.nodesbetween(bases, heads)
1605 1609 # We assume that all ancestors of bases are known
1606 1610 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1607 1611 return self._changegroupsubset(common, csets, heads, source)
1608 1612
1609 1613 def getbundle(self, source, heads=None, common=None):
1610 1614 """Like changegroupsubset, but returns the set difference between the
1611 1615 ancestors of heads and the ancestors common.
1612 1616
1613 1617 If heads is None, use the local heads. If common is None, use [nullid].
1614 1618
1615 1619 The nodes in common might not all be known locally due to the way the
1616 1620 current discovery protocol works.
1617 1621 """
1618 1622 cl = self.changelog
1619 1623 if common:
1620 1624 nm = cl.nodemap
1621 1625 common = [n for n in common if n in nm]
1622 1626 else:
1623 1627 common = [nullid]
1624 1628 if not heads:
1625 1629 heads = cl.heads()
1626 1630 common, missing = cl.findcommonmissing(common, heads)
1627 1631 if not missing:
1628 1632 return None
1629 1633 return self._changegroupsubset(common, missing, heads, source)
1630 1634
1631 1635 def _changegroupsubset(self, commonrevs, csets, heads, source):
1632 1636
1633 1637 cl = self.changelog
1634 1638 mf = self.manifest
1635 1639 mfs = {} # needed manifests
1636 1640 fnodes = {} # needed file nodes
1637 1641 changedfiles = set()
1638 1642 fstate = ['', {}]
1639 1643 count = [0]
1640 1644
1641 1645 # can we go through the fast path ?
1642 1646 heads.sort()
1643 1647 if heads == sorted(self.heads()):
1644 1648 return self._changegroup(csets, source)
1645 1649
1646 1650 # slow path
1647 1651 self.hook('preoutgoing', throw=True, source=source)
1648 1652 self.changegroupinfo(csets, source)
1649 1653
1650 1654 # filter any nodes that claim to be part of the known set
1651 1655 def prune(revlog, missing):
1652 1656 return [n for n in missing
1653 1657 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1654 1658
1655 1659 def lookup(revlog, x):
1656 1660 if revlog == cl:
1657 1661 c = cl.read(x)
1658 1662 changedfiles.update(c[3])
1659 1663 mfs.setdefault(c[0], x)
1660 1664 count[0] += 1
1661 1665 self.ui.progress(_('bundling'), count[0],
1662 1666 unit=_('changesets'), total=len(csets))
1663 1667 return x
1664 1668 elif revlog == mf:
1665 1669 clnode = mfs[x]
1666 1670 mdata = mf.readfast(x)
1667 1671 for f in changedfiles:
1668 1672 if f in mdata:
1669 1673 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1670 1674 count[0] += 1
1671 1675 self.ui.progress(_('bundling'), count[0],
1672 1676 unit=_('manifests'), total=len(mfs))
1673 1677 return mfs[x]
1674 1678 else:
1675 1679 self.ui.progress(
1676 1680 _('bundling'), count[0], item=fstate[0],
1677 1681 unit=_('files'), total=len(changedfiles))
1678 1682 return fstate[1][x]
1679 1683
1680 1684 bundler = changegroup.bundle10(lookup)
1681 1685 reorder = self.ui.config('bundle', 'reorder', 'auto')
1682 1686 if reorder == 'auto':
1683 1687 reorder = None
1684 1688 else:
1685 1689 reorder = util.parsebool(reorder)
1686 1690
1687 1691 def gengroup():
1688 1692 # Create a changenode group generator that will call our functions
1689 1693 # back to lookup the owning changenode and collect information.
1690 1694 for chunk in cl.group(csets, bundler, reorder=reorder):
1691 1695 yield chunk
1692 1696 self.ui.progress(_('bundling'), None)
1693 1697
1694 1698 # Create a generator for the manifestnodes that calls our lookup
1695 1699 # and data collection functions back.
1696 1700 count[0] = 0
1697 1701 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1698 1702 yield chunk
1699 1703 self.ui.progress(_('bundling'), None)
1700 1704
1701 1705 mfs.clear()
1702 1706
1703 1707 # Go through all our files in order sorted by name.
1704 1708 count[0] = 0
1705 1709 for fname in sorted(changedfiles):
1706 1710 filerevlog = self.file(fname)
1707 1711 if not len(filerevlog):
1708 1712 raise util.Abort(_("empty or missing revlog for %s") % fname)
1709 1713 fstate[0] = fname
1710 1714 fstate[1] = fnodes.pop(fname, {})
1711 1715
1712 1716 nodelist = prune(filerevlog, fstate[1])
1713 1717 if nodelist:
1714 1718 count[0] += 1
1715 1719 yield bundler.fileheader(fname)
1716 1720 for chunk in filerevlog.group(nodelist, bundler, reorder):
1717 1721 yield chunk
1718 1722
1719 1723 # Signal that no more groups are left.
1720 1724 yield bundler.close()
1721 1725 self.ui.progress(_('bundling'), None)
1722 1726
1723 1727 if csets:
1724 1728 self.hook('outgoing', node=hex(csets[0]), source=source)
1725 1729
1726 1730 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1727 1731
1728 1732 def changegroup(self, basenodes, source):
1729 1733 # to avoid a race we use changegroupsubset() (issue1320)
1730 1734 return self.changegroupsubset(basenodes, self.heads(), source)
1731 1735
1732 1736 def _changegroup(self, nodes, source):
1733 1737 """Compute the changegroup of all nodes that we have that a recipient
1734 1738 doesn't. Return a chunkbuffer object whose read() method will return
1735 1739 successive changegroup chunks.
1736 1740
1737 1741 This is much easier than the previous function as we can assume that
1738 1742 the recipient has any changenode we aren't sending them.
1739 1743
1740 1744 nodes is the set of nodes to send"""
1741 1745
1742 1746 cl = self.changelog
1743 1747 mf = self.manifest
1744 1748 mfs = {}
1745 1749 changedfiles = set()
1746 1750 fstate = ['']
1747 1751 count = [0]
1748 1752
1749 1753 self.hook('preoutgoing', throw=True, source=source)
1750 1754 self.changegroupinfo(nodes, source)
1751 1755
1752 1756 revset = set([cl.rev(n) for n in nodes])
1753 1757
1754 1758 def gennodelst(log):
1755 1759 return [log.node(r) for r in log if log.linkrev(r) in revset]
1756 1760
1757 1761 def lookup(revlog, x):
1758 1762 if revlog == cl:
1759 1763 c = cl.read(x)
1760 1764 changedfiles.update(c[3])
1761 1765 mfs.setdefault(c[0], x)
1762 1766 count[0] += 1
1763 1767 self.ui.progress(_('bundling'), count[0],
1764 1768 unit=_('changesets'), total=len(nodes))
1765 1769 return x
1766 1770 elif revlog == mf:
1767 1771 count[0] += 1
1768 1772 self.ui.progress(_('bundling'), count[0],
1769 1773 unit=_('manifests'), total=len(mfs))
1770 1774 return cl.node(revlog.linkrev(revlog.rev(x)))
1771 1775 else:
1772 1776 self.ui.progress(
1773 1777 _('bundling'), count[0], item=fstate[0],
1774 1778 total=len(changedfiles), unit=_('files'))
1775 1779 return cl.node(revlog.linkrev(revlog.rev(x)))
1776 1780
1777 1781 bundler = changegroup.bundle10(lookup)
1778 1782 reorder = self.ui.config('bundle', 'reorder', 'auto')
1779 1783 if reorder == 'auto':
1780 1784 reorder = None
1781 1785 else:
1782 1786 reorder = util.parsebool(reorder)
1783 1787
1784 1788 def gengroup():
1785 1789 '''yield a sequence of changegroup chunks (strings)'''
1786 1790 # construct a list of all changed files
1787 1791
1788 1792 for chunk in cl.group(nodes, bundler, reorder=reorder):
1789 1793 yield chunk
1790 1794 self.ui.progress(_('bundling'), None)
1791 1795
1792 1796 count[0] = 0
1793 1797 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1794 1798 yield chunk
1795 1799 self.ui.progress(_('bundling'), None)
1796 1800
1797 1801 count[0] = 0
1798 1802 for fname in sorted(changedfiles):
1799 1803 filerevlog = self.file(fname)
1800 1804 if not len(filerevlog):
1801 1805 raise util.Abort(_("empty or missing revlog for %s") % fname)
1802 1806 fstate[0] = fname
1803 1807 nodelist = gennodelst(filerevlog)
1804 1808 if nodelist:
1805 1809 count[0] += 1
1806 1810 yield bundler.fileheader(fname)
1807 1811 for chunk in filerevlog.group(nodelist, bundler, reorder):
1808 1812 yield chunk
1809 1813 yield bundler.close()
1810 1814 self.ui.progress(_('bundling'), None)
1811 1815
1812 1816 if nodes:
1813 1817 self.hook('outgoing', node=hex(nodes[0]), source=source)
1814 1818
1815 1819 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1816 1820
1817 1821 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1818 1822 """Add the changegroup returned by source.read() to this repo.
1819 1823 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1820 1824 the URL of the repo where this changegroup is coming from.
1821 1825 If lock is not None, the function takes ownership of the lock
1822 1826 and releases it after the changegroup is added.
1823 1827
1824 1828 Return an integer summarizing the change to this repo:
1825 1829 - nothing changed or no source: 0
1826 1830 - more heads than before: 1+added heads (2..n)
1827 1831 - fewer heads than before: -1-removed heads (-2..-n)
1828 1832 - number of heads stays the same: 1
1829 1833 """
1830 1834 def csmap(x):
1831 1835 self.ui.debug("add changeset %s\n" % short(x))
1832 1836 return len(cl)
1833 1837
1834 1838 def revmap(x):
1835 1839 return cl.rev(x)
1836 1840
1837 1841 if not source:
1838 1842 return 0
1839 1843
1840 1844 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1841 1845
1842 1846 changesets = files = revisions = 0
1843 1847 efiles = set()
1844 1848
1845 1849 # write changelog data to temp files so concurrent readers will not see
1846 1850 # inconsistent view
1847 1851 cl = self.changelog
1848 1852 cl.delayupdate()
1849 1853 oldheads = cl.heads()
1850 1854
1851 1855 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1852 1856 try:
1853 1857 trp = weakref.proxy(tr)
1854 1858 # pull off the changeset group
1855 1859 self.ui.status(_("adding changesets\n"))
1856 1860 clstart = len(cl)
1857 1861 class prog(object):
1858 1862 step = _('changesets')
1859 1863 count = 1
1860 1864 ui = self.ui
1861 1865 total = None
1862 1866 def __call__(self):
1863 1867 self.ui.progress(self.step, self.count, unit=_('chunks'),
1864 1868 total=self.total)
1865 1869 self.count += 1
1866 1870 pr = prog()
1867 1871 source.callback = pr
1868 1872
1869 1873 source.changelogheader()
1870 1874 if (cl.addgroup(source, csmap, trp) is None
1871 1875 and not emptyok):
1872 1876 raise util.Abort(_("received changelog group is empty"))
1873 1877 clend = len(cl)
1874 1878 changesets = clend - clstart
1875 1879 for c in xrange(clstart, clend):
1876 1880 efiles.update(self[c].files())
1877 1881 efiles = len(efiles)
1878 1882 self.ui.progress(_('changesets'), None)
1879 1883
1880 1884 # pull off the manifest group
1881 1885 self.ui.status(_("adding manifests\n"))
1882 1886 pr.step = _('manifests')
1883 1887 pr.count = 1
1884 1888 pr.total = changesets # manifests <= changesets
1885 1889 # no need to check for empty manifest group here:
1886 1890 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1887 1891 # no new manifest will be created and the manifest group will
1888 1892 # be empty during the pull
1889 1893 source.manifestheader()
1890 1894 self.manifest.addgroup(source, revmap, trp)
1891 1895 self.ui.progress(_('manifests'), None)
1892 1896
1893 1897 needfiles = {}
1894 1898 if self.ui.configbool('server', 'validate', default=False):
1895 1899 # validate incoming csets have their manifests
1896 1900 for cset in xrange(clstart, clend):
1897 1901 mfest = self.changelog.read(self.changelog.node(cset))[0]
1898 1902 mfest = self.manifest.readdelta(mfest)
1899 1903 # store file nodes we must see
1900 1904 for f, n in mfest.iteritems():
1901 1905 needfiles.setdefault(f, set()).add(n)
1902 1906
1903 1907 # process the files
1904 1908 self.ui.status(_("adding file changes\n"))
1905 1909 pr.step = _('files')
1906 1910 pr.count = 1
1907 1911 pr.total = efiles
1908 1912 source.callback = None
1909 1913
1910 1914 while True:
1911 1915 chunkdata = source.filelogheader()
1912 1916 if not chunkdata:
1913 1917 break
1914 1918 f = chunkdata["filename"]
1915 1919 self.ui.debug("adding %s revisions\n" % f)
1916 1920 pr()
1917 1921 fl = self.file(f)
1918 1922 o = len(fl)
1919 1923 if fl.addgroup(source, revmap, trp) is None:
1920 1924 raise util.Abort(_("received file revlog group is empty"))
1921 1925 revisions += len(fl) - o
1922 1926 files += 1
1923 1927 if f in needfiles:
1924 1928 needs = needfiles[f]
1925 1929 for new in xrange(o, len(fl)):
1926 1930 n = fl.node(new)
1927 1931 if n in needs:
1928 1932 needs.remove(n)
1929 1933 if not needs:
1930 1934 del needfiles[f]
1931 1935 self.ui.progress(_('files'), None)
1932 1936
1933 1937 for f, needs in needfiles.iteritems():
1934 1938 fl = self.file(f)
1935 1939 for n in needs:
1936 1940 try:
1937 1941 fl.rev(n)
1938 1942 except error.LookupError:
1939 1943 raise util.Abort(
1940 1944 _('missing file data for %s:%s - run hg verify') %
1941 1945 (f, hex(n)))
1942 1946
1943 1947 dh = 0
1944 1948 if oldheads:
1945 1949 heads = cl.heads()
1946 1950 dh = len(heads) - len(oldheads)
1947 1951 for h in heads:
1948 1952 if h not in oldheads and 'close' in self[h].extra():
1949 1953 dh -= 1
1950 1954 htext = ""
1951 1955 if dh:
1952 1956 htext = _(" (%+d heads)") % dh
1953 1957
1954 1958 self.ui.status(_("added %d changesets"
1955 1959 " with %d changes to %d files%s\n")
1956 1960 % (changesets, revisions, files, htext))
1957 1961
1958 1962 if changesets > 0:
1959 1963 p = lambda: cl.writepending() and self.root or ""
1960 1964 self.hook('pretxnchangegroup', throw=True,
1961 1965 node=hex(cl.node(clstart)), source=srctype,
1962 1966 url=url, pending=p)
1963 1967
1964 1968 # make changelog see real files again
1965 1969 cl.finalize(trp)
1966 1970
1967 1971 tr.close()
1968 1972 finally:
1969 1973 tr.release()
1970 1974 if lock:
1971 1975 lock.release()
1972 1976
1973 1977 if changesets > 0:
1974 1978 # forcefully update the on-disk branch cache
1975 1979 self.ui.debug("updating the branch cache\n")
1976 1980 self.updatebranchcache()
1977 1981 self.hook("changegroup", node=hex(cl.node(clstart)),
1978 1982 source=srctype, url=url)
1979 1983
1980 1984 for i in xrange(clstart, clend):
1981 1985 self.hook("incoming", node=hex(cl.node(i)),
1982 1986 source=srctype, url=url)
1983 1987
1984 1988 # never return 0 here:
1985 1989 if dh < 0:
1986 1990 return dh - 1
1987 1991 else:
1988 1992 return dh + 1
1989 1993
1990 1994 def stream_in(self, remote, requirements):
1991 1995 lock = self.lock()
1992 1996 try:
1993 1997 fp = remote.stream_out()
1994 1998 l = fp.readline()
1995 1999 try:
1996 2000 resp = int(l)
1997 2001 except ValueError:
1998 2002 raise error.ResponseError(
1999 2003 _('Unexpected response from remote server:'), l)
2000 2004 if resp == 1:
2001 2005 raise util.Abort(_('operation forbidden by server'))
2002 2006 elif resp == 2:
2003 2007 raise util.Abort(_('locking the remote repository failed'))
2004 2008 elif resp != 0:
2005 2009 raise util.Abort(_('the server sent an unknown error code'))
2006 2010 self.ui.status(_('streaming all changes\n'))
2007 2011 l = fp.readline()
2008 2012 try:
2009 2013 total_files, total_bytes = map(int, l.split(' ', 1))
2010 2014 except (ValueError, TypeError):
2011 2015 raise error.ResponseError(
2012 2016 _('Unexpected response from remote server:'), l)
2013 2017 self.ui.status(_('%d files to transfer, %s of data\n') %
2014 2018 (total_files, util.bytecount(total_bytes)))
2015 2019 start = time.time()
2016 2020 for i in xrange(total_files):
2017 2021 # XXX doesn't support '\n' or '\r' in filenames
2018 2022 l = fp.readline()
2019 2023 try:
2020 2024 name, size = l.split('\0', 1)
2021 2025 size = int(size)
2022 2026 except (ValueError, TypeError):
2023 2027 raise error.ResponseError(
2024 2028 _('Unexpected response from remote server:'), l)
2025 2029 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2026 2030 # for backwards compat, name was partially encoded
2027 2031 ofp = self.sopener(store.decodedir(name), 'w')
2028 2032 for chunk in util.filechunkiter(fp, limit=size):
2029 2033 ofp.write(chunk)
2030 2034 ofp.close()
2031 2035 elapsed = time.time() - start
2032 2036 if elapsed <= 0:
2033 2037 elapsed = 0.001
2034 2038 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2035 2039 (util.bytecount(total_bytes), elapsed,
2036 2040 util.bytecount(total_bytes / elapsed)))
2037 2041
2038 2042 # new requirements = old non-format requirements + new format-related
2039 2043 # requirements from the streamed-in repository
2040 2044 requirements.update(set(self.requirements) - self.supportedformats)
2041 2045 self._applyrequirements(requirements)
2042 2046 self._writerequirements()
2043 2047
2044 2048 self.invalidate()
2045 2049 return len(self.heads()) + 1
2046 2050 finally:
2047 2051 lock.release()
2048 2052
2049 2053 def clone(self, remote, heads=[], stream=False):
2050 2054 '''clone remote repository.
2051 2055
2052 2056 keyword arguments:
2053 2057 heads: list of revs to clone (forces use of pull)
2054 2058 stream: use streaming clone if possible'''
2055 2059
2056 2060 # now, all clients that can request uncompressed clones can
2057 2061 # read repo formats supported by all servers that can serve
2058 2062 # them.
2059 2063
2060 2064 # if revlog format changes, client will have to check version
2061 2065 # and format flags on "stream" capability, and use
2062 2066 # uncompressed only if compatible.
2063 2067
2064 2068 if stream and not heads:
2065 2069 # 'stream' means remote revlog format is revlogv1 only
2066 2070 if remote.capable('stream'):
2067 2071 return self.stream_in(remote, set(('revlogv1',)))
2068 2072 # otherwise, 'streamreqs' contains the remote revlog format
2069 2073 streamreqs = remote.capable('streamreqs')
2070 2074 if streamreqs:
2071 2075 streamreqs = set(streamreqs.split(','))
2072 2076 # if we support it, stream in and adjust our requirements
2073 2077 if not streamreqs - self.supportedformats:
2074 2078 return self.stream_in(remote, streamreqs)
2075 2079 return self.pull(remote, heads)
2076 2080
2077 2081 def pushkey(self, namespace, key, old, new):
2078 2082 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2079 2083 old=old, new=new)
2080 2084 ret = pushkey.push(self, namespace, key, old, new)
2081 2085 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2082 2086 ret=ret)
2083 2087 return ret
2084 2088
2085 2089 def listkeys(self, namespace):
2086 2090 self.hook('prelistkeys', throw=True, namespace=namespace)
2087 2091 values = pushkey.list(self, namespace)
2088 2092 self.hook('listkeys', namespace=namespace, values=values)
2089 2093 return values
2090 2094
2091 2095 def debugwireargs(self, one, two, three=None, four=None, five=None):
2092 2096 '''used to test argument passing over the wire'''
2093 2097 return "%s %s %s %s %s" % (one, two, three, four, five)
2094 2098
2095 2099 def savecommitmessage(self, text):
2096 2100 fp = self.opener('last-message.txt', 'wb')
2097 2101 try:
2098 2102 fp.write(text)
2099 2103 finally:
2100 2104 fp.close()
2101 2105 return self.pathto(fp.name[len(self.root)+1:])
2102 2106
2103 2107 # used to avoid circular references so destructors work
2104 2108 def aftertrans(files):
2105 2109 renamefiles = [tuple(t) for t in files]
2106 2110 def a():
2107 2111 for src, dest in renamefiles:
2108 2112 util.rename(src, dest)
2109 2113 return a
2110 2114
2111 2115 def undoname(fn):
2112 2116 base, name = os.path.split(fn)
2113 2117 assert name.startswith('journal')
2114 2118 return os.path.join(base, name.replace('journal', 'undo', 1))
2115 2119
2116 2120 def instance(ui, path, create):
2117 2121 return localrepository(ui, util.urllocalpath(path), create)
2118 2122
2119 2123 def islocal(path):
2120 2124 return True
@@ -1,41 +1,66
1 1 # Mercurial phases support code
2 2 #
3 3 # Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 # Augie Fackler <durin42@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 import errno
11 11 from node import nullid, bin, hex
12 12
13 13 allphases = range(2)
14 14 trackedphases = allphases[1:]
15 15
16 16 def readroots(repo):
17 17 """Read phase roots from disk"""
18 18 roots = [set() for i in allphases]
19 19 roots[0].add(nullid)
20 20 try:
21 21 f = repo.sopener('phaseroots')
22 22 try:
23 23 for line in f:
24 24 phase, nh = line.strip().split()
25 25 roots[int(phase)].add(bin(nh))
26 26 finally:
27 27 f.close()
28 28 except IOError, inst:
29 29 if inst.errno != errno.ENOENT:
30 30 raise
31 31 return roots
32 32
33 33 def writeroots(repo):
34 34 """Write phase roots from disk"""
35 35 f = repo.sopener('phaseroots', 'w', atomictemp=True)
36 36 try:
37 37 for phase, roots in enumerate(repo._phaseroots):
38 38 for h in roots:
39 39 f.write('%i %s\n' % (phase, hex(h)))
40 repo._dirtyphases = False
40 41 finally:
41 42 f.close()
43
44 def moveboundary(repo, target_phase, nodes):
45 """Add nodes to a phase changing other nodes phases if necessary.
46
47 Simplify boundary to contains phase roots only."""
48
49 # move roots of lower states
50 for phase in xrange(target_phase + 1, len(allphases)):
51 # filter nodes that are not in a compatible phase already
52 # XXX rev phase cache might have been invalidated by a previous loop
53 # XXX we need to be smarter here
54 nodes = [n for n in nodes if repo[n].phase() >= phase]
55 if not nodes:
56 break # no roots to move anymore
57 roots = repo._phaseroots[phase]
58 olds = roots.copy()
59 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
60 roots.clear()
61 roots.update(ctx.node() for ctx in ctxs)
62 if olds != roots:
63 # invalidate cache (we probably could be smarter here
64 if '_phaserev' in vars(repo):
65 del repo._phaserev
66 repo._dirtyphases = True
General Comments 0
You need to be logged in to leave comments. Login now