##// END OF EJS Templates
phases: call filterunknown() in readroots()...
Patrick Mezard -
r16624:3f85cef6 default
parent child Browse files
Show More
@@ -1,2349 +1,2348 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class storecache(filecache):
23 23 """filecache for files in the store"""
24 24 def join(self, obj, fname):
25 25 return obj.sjoin(fname)
26 26
27 27 class localrepository(repo.repository):
28 28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 29 'known', 'getbundle'))
30 30 supportedformats = set(('revlogv1', 'generaldelta'))
31 31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 32 'dotencode'))
33 33
34 34 def __init__(self, baseui, path=None, create=False):
35 35 repo.repository.__init__(self)
36 36 self.root = os.path.realpath(util.expandpath(path))
37 37 self.path = os.path.join(self.root, ".hg")
38 38 self.origroot = path
39 39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 40 self.opener = scmutil.opener(self.path)
41 41 self.wopener = scmutil.opener(self.root)
42 42 self.baseui = baseui
43 43 self.ui = baseui.copy()
44 44 self._dirtyphases = False
45 45 # A list of callback to shape the phase if no data were found.
46 46 # Callback are in the form: func(repo, roots) --> processed root.
47 47 # This list it to be filled by extension during repo setup
48 48 self._phasedefaults = []
49 49
50 50 try:
51 51 self.ui.readconfig(self.join("hgrc"), self.root)
52 52 extensions.loadall(self.ui)
53 53 except IOError:
54 54 pass
55 55
56 56 if not os.path.isdir(self.path):
57 57 if create:
58 58 if not os.path.exists(path):
59 59 util.makedirs(path)
60 60 util.makedir(self.path, notindexed=True)
61 61 requirements = ["revlogv1"]
62 62 if self.ui.configbool('format', 'usestore', True):
63 63 os.mkdir(os.path.join(self.path, "store"))
64 64 requirements.append("store")
65 65 if self.ui.configbool('format', 'usefncache', True):
66 66 requirements.append("fncache")
67 67 if self.ui.configbool('format', 'dotencode', True):
68 68 requirements.append('dotencode')
69 69 # create an invalid changelog
70 70 self.opener.append(
71 71 "00changelog.i",
72 72 '\0\0\0\2' # represents revlogv2
73 73 ' dummy changelog to prevent using the old repo layout'
74 74 )
75 75 if self.ui.configbool('format', 'generaldelta', False):
76 76 requirements.append("generaldelta")
77 77 requirements = set(requirements)
78 78 else:
79 79 raise error.RepoError(_("repository %s not found") % path)
80 80 elif create:
81 81 raise error.RepoError(_("repository %s already exists") % path)
82 82 else:
83 83 try:
84 84 requirements = scmutil.readrequires(self.opener, self.supported)
85 85 except IOError, inst:
86 86 if inst.errno != errno.ENOENT:
87 87 raise
88 88 requirements = set()
89 89
90 90 self.sharedpath = self.path
91 91 try:
92 92 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
93 93 if not os.path.exists(s):
94 94 raise error.RepoError(
95 95 _('.hg/sharedpath points to nonexistent directory %s') % s)
96 96 self.sharedpath = s
97 97 except IOError, inst:
98 98 if inst.errno != errno.ENOENT:
99 99 raise
100 100
101 101 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
102 102 self.spath = self.store.path
103 103 self.sopener = self.store.opener
104 104 self.sjoin = self.store.join
105 105 self.opener.createmode = self.store.createmode
106 106 self._applyrequirements(requirements)
107 107 if create:
108 108 self._writerequirements()
109 109
110 110
111 111 self._branchcache = None
112 112 self._branchcachetip = None
113 113 self.filterpats = {}
114 114 self._datafilters = {}
115 115 self._transref = self._lockref = self._wlockref = None
116 116
117 117 # A cache for various files under .hg/ that tracks file changes,
118 118 # (used by the filecache decorator)
119 119 #
120 120 # Maps a property name to its util.filecacheentry
121 121 self._filecache = {}
122 122
123 123 def _applyrequirements(self, requirements):
124 124 self.requirements = requirements
125 125 openerreqs = set(('revlogv1', 'generaldelta'))
126 126 self.sopener.options = dict((r, 1) for r in requirements
127 127 if r in openerreqs)
128 128
129 129 def _writerequirements(self):
130 130 reqfile = self.opener("requires", "w")
131 131 for r in self.requirements:
132 132 reqfile.write("%s\n" % r)
133 133 reqfile.close()
134 134
135 135 def _checknested(self, path):
136 136 """Determine if path is a legal nested repository."""
137 137 if not path.startswith(self.root):
138 138 return False
139 139 subpath = path[len(self.root) + 1:]
140 140 normsubpath = util.pconvert(subpath)
141 141
142 142 # XXX: Checking against the current working copy is wrong in
143 143 # the sense that it can reject things like
144 144 #
145 145 # $ hg cat -r 10 sub/x.txt
146 146 #
147 147 # if sub/ is no longer a subrepository in the working copy
148 148 # parent revision.
149 149 #
150 150 # However, it can of course also allow things that would have
151 151 # been rejected before, such as the above cat command if sub/
152 152 # is a subrepository now, but was a normal directory before.
153 153 # The old path auditor would have rejected by mistake since it
154 154 # panics when it sees sub/.hg/.
155 155 #
156 156 # All in all, checking against the working copy seems sensible
157 157 # since we want to prevent access to nested repositories on
158 158 # the filesystem *now*.
159 159 ctx = self[None]
160 160 parts = util.splitpath(subpath)
161 161 while parts:
162 162 prefix = '/'.join(parts)
163 163 if prefix in ctx.substate:
164 164 if prefix == normsubpath:
165 165 return True
166 166 else:
167 167 sub = ctx.sub(prefix)
168 168 return sub.checknested(subpath[len(prefix) + 1:])
169 169 else:
170 170 parts.pop()
171 171 return False
172 172
173 173 @filecache('bookmarks')
174 174 def _bookmarks(self):
175 175 return bookmarks.read(self)
176 176
177 177 @filecache('bookmarks.current')
178 178 def _bookmarkcurrent(self):
179 179 return bookmarks.readcurrent(self)
180 180
181 181 def _writebookmarks(self, marks):
182 182 bookmarks.write(self)
183 183
184 184 @storecache('phaseroots')
185 185 def _phaseroots(self):
186 186 self._dirtyphases = False
187 187 phaseroots = phases.readroots(self)
188 phases.filterunknown(self, phaseroots)
189 188 return phaseroots
190 189
191 190 @propertycache
192 191 def _phaserev(self):
193 192 cache = [phases.public] * len(self)
194 193 for phase in phases.trackedphases:
195 194 roots = map(self.changelog.rev, self._phaseroots[phase])
196 195 if roots:
197 196 for rev in roots:
198 197 cache[rev] = phase
199 198 for rev in self.changelog.descendants(*roots):
200 199 cache[rev] = phase
201 200 return cache
202 201
203 202 @storecache('00changelog.i')
204 203 def changelog(self):
205 204 c = changelog.changelog(self.sopener)
206 205 if 'HG_PENDING' in os.environ:
207 206 p = os.environ['HG_PENDING']
208 207 if p.startswith(self.root):
209 208 c.readpending('00changelog.i.a')
210 209 return c
211 210
212 211 @storecache('00manifest.i')
213 212 def manifest(self):
214 213 return manifest.manifest(self.sopener)
215 214
216 215 @filecache('dirstate')
217 216 def dirstate(self):
218 217 warned = [0]
219 218 def validate(node):
220 219 try:
221 220 self.changelog.rev(node)
222 221 return node
223 222 except error.LookupError:
224 223 if not warned[0]:
225 224 warned[0] = True
226 225 self.ui.warn(_("warning: ignoring unknown"
227 226 " working parent %s!\n") % short(node))
228 227 return nullid
229 228
230 229 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
231 230
232 231 def __getitem__(self, changeid):
233 232 if changeid is None:
234 233 return context.workingctx(self)
235 234 return context.changectx(self, changeid)
236 235
237 236 def __contains__(self, changeid):
238 237 try:
239 238 return bool(self.lookup(changeid))
240 239 except error.RepoLookupError:
241 240 return False
242 241
243 242 def __nonzero__(self):
244 243 return True
245 244
246 245 def __len__(self):
247 246 return len(self.changelog)
248 247
249 248 def __iter__(self):
250 249 for i in xrange(len(self)):
251 250 yield i
252 251
253 252 def revs(self, expr, *args):
254 253 '''Return a list of revisions matching the given revset'''
255 254 expr = revset.formatspec(expr, *args)
256 255 m = revset.match(None, expr)
257 256 return [r for r in m(self, range(len(self)))]
258 257
259 258 def set(self, expr, *args):
260 259 '''
261 260 Yield a context for each matching revision, after doing arg
262 261 replacement via revset.formatspec
263 262 '''
264 263 for r in self.revs(expr, *args):
265 264 yield self[r]
266 265
267 266 def url(self):
268 267 return 'file:' + self.root
269 268
270 269 def hook(self, name, throw=False, **args):
271 270 return hook.hook(self.ui, self, name, throw, **args)
272 271
273 272 tag_disallowed = ':\r\n'
274 273
275 274 def _tag(self, names, node, message, local, user, date, extra={}):
276 275 if isinstance(names, str):
277 276 allchars = names
278 277 names = (names,)
279 278 else:
280 279 allchars = ''.join(names)
281 280 for c in self.tag_disallowed:
282 281 if c in allchars:
283 282 raise util.Abort(_('%r cannot be used in a tag name') % c)
284 283
285 284 branches = self.branchmap()
286 285 for name in names:
287 286 self.hook('pretag', throw=True, node=hex(node), tag=name,
288 287 local=local)
289 288 if name in branches:
290 289 self.ui.warn(_("warning: tag %s conflicts with existing"
291 290 " branch name\n") % name)
292 291
293 292 def writetags(fp, names, munge, prevtags):
294 293 fp.seek(0, 2)
295 294 if prevtags and prevtags[-1] != '\n':
296 295 fp.write('\n')
297 296 for name in names:
298 297 m = munge and munge(name) or name
299 298 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
300 299 old = self.tags().get(name, nullid)
301 300 fp.write('%s %s\n' % (hex(old), m))
302 301 fp.write('%s %s\n' % (hex(node), m))
303 302 fp.close()
304 303
305 304 prevtags = ''
306 305 if local:
307 306 try:
308 307 fp = self.opener('localtags', 'r+')
309 308 except IOError:
310 309 fp = self.opener('localtags', 'a')
311 310 else:
312 311 prevtags = fp.read()
313 312
314 313 # local tags are stored in the current charset
315 314 writetags(fp, names, None, prevtags)
316 315 for name in names:
317 316 self.hook('tag', node=hex(node), tag=name, local=local)
318 317 return
319 318
320 319 try:
321 320 fp = self.wfile('.hgtags', 'rb+')
322 321 except IOError, e:
323 322 if e.errno != errno.ENOENT:
324 323 raise
325 324 fp = self.wfile('.hgtags', 'ab')
326 325 else:
327 326 prevtags = fp.read()
328 327
329 328 # committed tags are stored in UTF-8
330 329 writetags(fp, names, encoding.fromlocal, prevtags)
331 330
332 331 fp.close()
333 332
334 333 self.invalidatecaches()
335 334
336 335 if '.hgtags' not in self.dirstate:
337 336 self[None].add(['.hgtags'])
338 337
339 338 m = matchmod.exact(self.root, '', ['.hgtags'])
340 339 tagnode = self.commit(message, user, date, extra=extra, match=m)
341 340
342 341 for name in names:
343 342 self.hook('tag', node=hex(node), tag=name, local=local)
344 343
345 344 return tagnode
346 345
347 346 def tag(self, names, node, message, local, user, date):
348 347 '''tag a revision with one or more symbolic names.
349 348
350 349 names is a list of strings or, when adding a single tag, names may be a
351 350 string.
352 351
353 352 if local is True, the tags are stored in a per-repository file.
354 353 otherwise, they are stored in the .hgtags file, and a new
355 354 changeset is committed with the change.
356 355
357 356 keyword arguments:
358 357
359 358 local: whether to store tags in non-version-controlled file
360 359 (default False)
361 360
362 361 message: commit message to use if committing
363 362
364 363 user: name of user to use if committing
365 364
366 365 date: date tuple to use if committing'''
367 366
368 367 if not local:
369 368 for x in self.status()[:5]:
370 369 if '.hgtags' in x:
371 370 raise util.Abort(_('working copy of .hgtags is changed '
372 371 '(please commit .hgtags manually)'))
373 372
374 373 self.tags() # instantiate the cache
375 374 self._tag(names, node, message, local, user, date)
376 375
377 376 @propertycache
378 377 def _tagscache(self):
379 378 '''Returns a tagscache object that contains various tags related caches.'''
380 379
381 380 # This simplifies its cache management by having one decorated
382 381 # function (this one) and the rest simply fetch things from it.
383 382 class tagscache(object):
384 383 def __init__(self):
385 384 # These two define the set of tags for this repository. tags
386 385 # maps tag name to node; tagtypes maps tag name to 'global' or
387 386 # 'local'. (Global tags are defined by .hgtags across all
388 387 # heads, and local tags are defined in .hg/localtags.)
389 388 # They constitute the in-memory cache of tags.
390 389 self.tags = self.tagtypes = None
391 390
392 391 self.nodetagscache = self.tagslist = None
393 392
394 393 cache = tagscache()
395 394 cache.tags, cache.tagtypes = self._findtags()
396 395
397 396 return cache
398 397
399 398 def tags(self):
400 399 '''return a mapping of tag to node'''
401 400 t = {}
402 401 for k, v in self._tagscache.tags.iteritems():
403 402 try:
404 403 # ignore tags to unknown nodes
405 404 self.changelog.rev(v)
406 405 t[k] = v
407 406 except error.LookupError:
408 407 pass
409 408 return t
410 409
411 410 def _findtags(self):
412 411 '''Do the hard work of finding tags. Return a pair of dicts
413 412 (tags, tagtypes) where tags maps tag name to node, and tagtypes
414 413 maps tag name to a string like \'global\' or \'local\'.
415 414 Subclasses or extensions are free to add their own tags, but
416 415 should be aware that the returned dicts will be retained for the
417 416 duration of the localrepo object.'''
418 417
419 418 # XXX what tagtype should subclasses/extensions use? Currently
420 419 # mq and bookmarks add tags, but do not set the tagtype at all.
421 420 # Should each extension invent its own tag type? Should there
422 421 # be one tagtype for all such "virtual" tags? Or is the status
423 422 # quo fine?
424 423
425 424 alltags = {} # map tag name to (node, hist)
426 425 tagtypes = {}
427 426
428 427 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
429 428 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
430 429
431 430 # Build the return dicts. Have to re-encode tag names because
432 431 # the tags module always uses UTF-8 (in order not to lose info
433 432 # writing to the cache), but the rest of Mercurial wants them in
434 433 # local encoding.
435 434 tags = {}
436 435 for (name, (node, hist)) in alltags.iteritems():
437 436 if node != nullid:
438 437 tags[encoding.tolocal(name)] = node
439 438 tags['tip'] = self.changelog.tip()
440 439 tagtypes = dict([(encoding.tolocal(name), value)
441 440 for (name, value) in tagtypes.iteritems()])
442 441 return (tags, tagtypes)
443 442
444 443 def tagtype(self, tagname):
445 444 '''
446 445 return the type of the given tag. result can be:
447 446
448 447 'local' : a local tag
449 448 'global' : a global tag
450 449 None : tag does not exist
451 450 '''
452 451
453 452 return self._tagscache.tagtypes.get(tagname)
454 453
455 454 def tagslist(self):
456 455 '''return a list of tags ordered by revision'''
457 456 if not self._tagscache.tagslist:
458 457 l = []
459 458 for t, n in self.tags().iteritems():
460 459 r = self.changelog.rev(n)
461 460 l.append((r, t, n))
462 461 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
463 462
464 463 return self._tagscache.tagslist
465 464
466 465 def nodetags(self, node):
467 466 '''return the tags associated with a node'''
468 467 if not self._tagscache.nodetagscache:
469 468 nodetagscache = {}
470 469 for t, n in self._tagscache.tags.iteritems():
471 470 nodetagscache.setdefault(n, []).append(t)
472 471 for tags in nodetagscache.itervalues():
473 472 tags.sort()
474 473 self._tagscache.nodetagscache = nodetagscache
475 474 return self._tagscache.nodetagscache.get(node, [])
476 475
477 476 def nodebookmarks(self, node):
478 477 marks = []
479 478 for bookmark, n in self._bookmarks.iteritems():
480 479 if n == node:
481 480 marks.append(bookmark)
482 481 return sorted(marks)
483 482
484 483 def _branchtags(self, partial, lrev):
485 484 # TODO: rename this function?
486 485 tiprev = len(self) - 1
487 486 if lrev != tiprev:
488 487 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
489 488 self._updatebranchcache(partial, ctxgen)
490 489 self._writebranchcache(partial, self.changelog.tip(), tiprev)
491 490
492 491 return partial
493 492
494 493 def updatebranchcache(self):
495 494 tip = self.changelog.tip()
496 495 if self._branchcache is not None and self._branchcachetip == tip:
497 496 return
498 497
499 498 oldtip = self._branchcachetip
500 499 self._branchcachetip = tip
501 500 if oldtip is None or oldtip not in self.changelog.nodemap:
502 501 partial, last, lrev = self._readbranchcache()
503 502 else:
504 503 lrev = self.changelog.rev(oldtip)
505 504 partial = self._branchcache
506 505
507 506 self._branchtags(partial, lrev)
508 507 # this private cache holds all heads (not just tips)
509 508 self._branchcache = partial
510 509
511 510 def branchmap(self):
512 511 '''returns a dictionary {branch: [branchheads]}'''
513 512 self.updatebranchcache()
514 513 return self._branchcache
515 514
516 515 def branchtags(self):
517 516 '''return a dict where branch names map to the tipmost head of
518 517 the branch, open heads come before closed'''
519 518 bt = {}
520 519 for bn, heads in self.branchmap().iteritems():
521 520 tip = heads[-1]
522 521 for h in reversed(heads):
523 522 if 'close' not in self.changelog.read(h)[5]:
524 523 tip = h
525 524 break
526 525 bt[bn] = tip
527 526 return bt
528 527
529 528 def _readbranchcache(self):
530 529 partial = {}
531 530 try:
532 531 f = self.opener("cache/branchheads")
533 532 lines = f.read().split('\n')
534 533 f.close()
535 534 except (IOError, OSError):
536 535 return {}, nullid, nullrev
537 536
538 537 try:
539 538 last, lrev = lines.pop(0).split(" ", 1)
540 539 last, lrev = bin(last), int(lrev)
541 540 if lrev >= len(self) or self[lrev].node() != last:
542 541 # invalidate the cache
543 542 raise ValueError('invalidating branch cache (tip differs)')
544 543 for l in lines:
545 544 if not l:
546 545 continue
547 546 node, label = l.split(" ", 1)
548 547 label = encoding.tolocal(label.strip())
549 548 partial.setdefault(label, []).append(bin(node))
550 549 except KeyboardInterrupt:
551 550 raise
552 551 except Exception, inst:
553 552 if self.ui.debugflag:
554 553 self.ui.warn(str(inst), '\n')
555 554 partial, last, lrev = {}, nullid, nullrev
556 555 return partial, last, lrev
557 556
558 557 def _writebranchcache(self, branches, tip, tiprev):
559 558 try:
560 559 f = self.opener("cache/branchheads", "w", atomictemp=True)
561 560 f.write("%s %s\n" % (hex(tip), tiprev))
562 561 for label, nodes in branches.iteritems():
563 562 for node in nodes:
564 563 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
565 564 f.close()
566 565 except (IOError, OSError):
567 566 pass
568 567
569 568 def _updatebranchcache(self, partial, ctxgen):
570 569 # collect new branch entries
571 570 newbranches = {}
572 571 for c in ctxgen:
573 572 newbranches.setdefault(c.branch(), []).append(c.node())
574 573 # if older branchheads are reachable from new ones, they aren't
575 574 # really branchheads. Note checking parents is insufficient:
576 575 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
577 576 for branch, newnodes in newbranches.iteritems():
578 577 bheads = partial.setdefault(branch, [])
579 578 bheads.extend(newnodes)
580 579 if len(bheads) <= 1:
581 580 continue
582 581 bheads = sorted(bheads, key=lambda x: self[x].rev())
583 582 # starting from tip means fewer passes over reachable
584 583 while newnodes:
585 584 latest = newnodes.pop()
586 585 if latest not in bheads:
587 586 continue
588 587 minbhrev = self[bheads[0]].node()
589 588 reachable = self.changelog.reachable(latest, minbhrev)
590 589 reachable.remove(latest)
591 590 if reachable:
592 591 bheads = [b for b in bheads if b not in reachable]
593 592 partial[branch] = bheads
594 593
595 594 def lookup(self, key):
596 595 return self[key].node()
597 596
598 597 def lookupbranch(self, key, remote=None):
599 598 repo = remote or self
600 599 if key in repo.branchmap():
601 600 return key
602 601
603 602 repo = (remote and remote.local()) and remote or self
604 603 return repo[key].branch()
605 604
606 605 def known(self, nodes):
607 606 nm = self.changelog.nodemap
608 607 result = []
609 608 for n in nodes:
610 609 r = nm.get(n)
611 610 resp = not (r is None or self._phaserev[r] >= phases.secret)
612 611 result.append(resp)
613 612 return result
614 613
615 614 def local(self):
616 615 return self
617 616
618 617 def join(self, f):
619 618 return os.path.join(self.path, f)
620 619
621 620 def wjoin(self, f):
622 621 return os.path.join(self.root, f)
623 622
624 623 def file(self, f):
625 624 if f[0] == '/':
626 625 f = f[1:]
627 626 return filelog.filelog(self.sopener, f)
628 627
629 628 def changectx(self, changeid):
630 629 return self[changeid]
631 630
632 631 def parents(self, changeid=None):
633 632 '''get list of changectxs for parents of changeid'''
634 633 return self[changeid].parents()
635 634
636 635 def setparents(self, p1, p2=nullid):
637 636 copies = self.dirstate.setparents(p1, p2)
638 637 if copies:
639 638 # Adjust copy records, the dirstate cannot do it, it
640 639 # requires access to parents manifests. Preserve them
641 640 # only for entries added to first parent.
642 641 pctx = self[p1]
643 642 for f in copies:
644 643 if f not in pctx and copies[f] in pctx:
645 644 self.dirstate.copy(copies[f], f)
646 645
647 646 def filectx(self, path, changeid=None, fileid=None):
648 647 """changeid can be a changeset revision, node, or tag.
649 648 fileid can be a file revision or node."""
650 649 return context.filectx(self, path, changeid, fileid)
651 650
652 651 def getcwd(self):
653 652 return self.dirstate.getcwd()
654 653
655 654 def pathto(self, f, cwd=None):
656 655 return self.dirstate.pathto(f, cwd)
657 656
658 657 def wfile(self, f, mode='r'):
659 658 return self.wopener(f, mode)
660 659
661 660 def _link(self, f):
662 661 return os.path.islink(self.wjoin(f))
663 662
664 663 def _loadfilter(self, filter):
665 664 if filter not in self.filterpats:
666 665 l = []
667 666 for pat, cmd in self.ui.configitems(filter):
668 667 if cmd == '!':
669 668 continue
670 669 mf = matchmod.match(self.root, '', [pat])
671 670 fn = None
672 671 params = cmd
673 672 for name, filterfn in self._datafilters.iteritems():
674 673 if cmd.startswith(name):
675 674 fn = filterfn
676 675 params = cmd[len(name):].lstrip()
677 676 break
678 677 if not fn:
679 678 fn = lambda s, c, **kwargs: util.filter(s, c)
680 679 # Wrap old filters not supporting keyword arguments
681 680 if not inspect.getargspec(fn)[2]:
682 681 oldfn = fn
683 682 fn = lambda s, c, **kwargs: oldfn(s, c)
684 683 l.append((mf, fn, params))
685 684 self.filterpats[filter] = l
686 685 return self.filterpats[filter]
687 686
688 687 def _filter(self, filterpats, filename, data):
689 688 for mf, fn, cmd in filterpats:
690 689 if mf(filename):
691 690 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
692 691 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
693 692 break
694 693
695 694 return data
696 695
697 696 @propertycache
698 697 def _encodefilterpats(self):
699 698 return self._loadfilter('encode')
700 699
701 700 @propertycache
702 701 def _decodefilterpats(self):
703 702 return self._loadfilter('decode')
704 703
705 704 def adddatafilter(self, name, filter):
706 705 self._datafilters[name] = filter
707 706
708 707 def wread(self, filename):
709 708 if self._link(filename):
710 709 data = os.readlink(self.wjoin(filename))
711 710 else:
712 711 data = self.wopener.read(filename)
713 712 return self._filter(self._encodefilterpats, filename, data)
714 713
715 714 def wwrite(self, filename, data, flags):
716 715 data = self._filter(self._decodefilterpats, filename, data)
717 716 if 'l' in flags:
718 717 self.wopener.symlink(data, filename)
719 718 else:
720 719 self.wopener.write(filename, data)
721 720 if 'x' in flags:
722 721 util.setflags(self.wjoin(filename), False, True)
723 722
724 723 def wwritedata(self, filename, data):
725 724 return self._filter(self._decodefilterpats, filename, data)
726 725
727 726 def transaction(self, desc):
728 727 tr = self._transref and self._transref() or None
729 728 if tr and tr.running():
730 729 return tr.nest()
731 730
732 731 # abort here if the journal already exists
733 732 if os.path.exists(self.sjoin("journal")):
734 733 raise error.RepoError(
735 734 _("abandoned transaction found - run hg recover"))
736 735
737 736 self._writejournal(desc)
738 737 renames = [(x, undoname(x)) for x in self._journalfiles()]
739 738
740 739 tr = transaction.transaction(self.ui.warn, self.sopener,
741 740 self.sjoin("journal"),
742 741 aftertrans(renames),
743 742 self.store.createmode)
744 743 self._transref = weakref.ref(tr)
745 744 return tr
746 745
747 746 def _journalfiles(self):
748 747 return (self.sjoin('journal'), self.join('journal.dirstate'),
749 748 self.join('journal.branch'), self.join('journal.desc'),
750 749 self.join('journal.bookmarks'),
751 750 self.sjoin('journal.phaseroots'))
752 751
753 752 def undofiles(self):
754 753 return [undoname(x) for x in self._journalfiles()]
755 754
756 755 def _writejournal(self, desc):
757 756 self.opener.write("journal.dirstate",
758 757 self.opener.tryread("dirstate"))
759 758 self.opener.write("journal.branch",
760 759 encoding.fromlocal(self.dirstate.branch()))
761 760 self.opener.write("journal.desc",
762 761 "%d\n%s\n" % (len(self), desc))
763 762 self.opener.write("journal.bookmarks",
764 763 self.opener.tryread("bookmarks"))
765 764 self.sopener.write("journal.phaseroots",
766 765 self.sopener.tryread("phaseroots"))
767 766
768 767 def recover(self):
769 768 lock = self.lock()
770 769 try:
771 770 if os.path.exists(self.sjoin("journal")):
772 771 self.ui.status(_("rolling back interrupted transaction\n"))
773 772 transaction.rollback(self.sopener, self.sjoin("journal"),
774 773 self.ui.warn)
775 774 self.invalidate()
776 775 return True
777 776 else:
778 777 self.ui.warn(_("no interrupted transaction available\n"))
779 778 return False
780 779 finally:
781 780 lock.release()
782 781
783 782 def rollback(self, dryrun=False, force=False):
784 783 wlock = lock = None
785 784 try:
786 785 wlock = self.wlock()
787 786 lock = self.lock()
788 787 if os.path.exists(self.sjoin("undo")):
789 788 return self._rollback(dryrun, force)
790 789 else:
791 790 self.ui.warn(_("no rollback information available\n"))
792 791 return 1
793 792 finally:
794 793 release(lock, wlock)
795 794
796 795 def _rollback(self, dryrun, force):
797 796 ui = self.ui
798 797 try:
799 798 args = self.opener.read('undo.desc').splitlines()
800 799 (oldlen, desc, detail) = (int(args[0]), args[1], None)
801 800 if len(args) >= 3:
802 801 detail = args[2]
803 802 oldtip = oldlen - 1
804 803
805 804 if detail and ui.verbose:
806 805 msg = (_('repository tip rolled back to revision %s'
807 806 ' (undo %s: %s)\n')
808 807 % (oldtip, desc, detail))
809 808 else:
810 809 msg = (_('repository tip rolled back to revision %s'
811 810 ' (undo %s)\n')
812 811 % (oldtip, desc))
813 812 except IOError:
814 813 msg = _('rolling back unknown transaction\n')
815 814 desc = None
816 815
817 816 if not force and self['.'] != self['tip'] and desc == 'commit':
818 817 raise util.Abort(
819 818 _('rollback of last commit while not checked out '
820 819 'may lose data'), hint=_('use -f to force'))
821 820
822 821 ui.status(msg)
823 822 if dryrun:
824 823 return 0
825 824
826 825 parents = self.dirstate.parents()
827 826 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
828 827 if os.path.exists(self.join('undo.bookmarks')):
829 828 util.rename(self.join('undo.bookmarks'),
830 829 self.join('bookmarks'))
831 830 if os.path.exists(self.sjoin('undo.phaseroots')):
832 831 util.rename(self.sjoin('undo.phaseroots'),
833 832 self.sjoin('phaseroots'))
834 833 self.invalidate()
835 834
836 835 parentgone = (parents[0] not in self.changelog.nodemap or
837 836 parents[1] not in self.changelog.nodemap)
838 837 if parentgone:
839 838 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
840 839 try:
841 840 branch = self.opener.read('undo.branch')
842 841 self.dirstate.setbranch(branch)
843 842 except IOError:
844 843 ui.warn(_('named branch could not be reset: '
845 844 'current branch is still \'%s\'\n')
846 845 % self.dirstate.branch())
847 846
848 847 self.dirstate.invalidate()
849 848 parents = tuple([p.rev() for p in self.parents()])
850 849 if len(parents) > 1:
851 850 ui.status(_('working directory now based on '
852 851 'revisions %d and %d\n') % parents)
853 852 else:
854 853 ui.status(_('working directory now based on '
855 854 'revision %d\n') % parents)
856 855 self.destroyed()
857 856 return 0
858 857
859 858 def invalidatecaches(self):
860 859 def delcache(name):
861 860 try:
862 861 delattr(self, name)
863 862 except AttributeError:
864 863 pass
865 864
866 865 delcache('_tagscache')
867 866 delcache('_phaserev')
868 867
869 868 self._branchcache = None # in UTF-8
870 869 self._branchcachetip = None
871 870
872 871 def invalidatedirstate(self):
873 872 '''Invalidates the dirstate, causing the next call to dirstate
874 873 to check if it was modified since the last time it was read,
875 874 rereading it if it has.
876 875
877 876 This is different to dirstate.invalidate() that it doesn't always
878 877 rereads the dirstate. Use dirstate.invalidate() if you want to
879 878 explicitly read the dirstate again (i.e. restoring it to a previous
880 879 known good state).'''
881 880 if 'dirstate' in self.__dict__:
882 881 for k in self.dirstate._filecache:
883 882 try:
884 883 delattr(self.dirstate, k)
885 884 except AttributeError:
886 885 pass
887 886 delattr(self, 'dirstate')
888 887
889 888 def invalidate(self):
890 889 for k in self._filecache:
891 890 # dirstate is invalidated separately in invalidatedirstate()
892 891 if k == 'dirstate':
893 892 continue
894 893
895 894 try:
896 895 delattr(self, k)
897 896 except AttributeError:
898 897 pass
899 898 self.invalidatecaches()
900 899
901 900 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
902 901 try:
903 902 l = lock.lock(lockname, 0, releasefn, desc=desc)
904 903 except error.LockHeld, inst:
905 904 if not wait:
906 905 raise
907 906 self.ui.warn(_("waiting for lock on %s held by %r\n") %
908 907 (desc, inst.locker))
909 908 # default to 600 seconds timeout
910 909 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
911 910 releasefn, desc=desc)
912 911 if acquirefn:
913 912 acquirefn()
914 913 return l
915 914
916 915 def _afterlock(self, callback):
917 916 """add a callback to the current repository lock.
918 917
919 918 The callback will be executed on lock release."""
920 919 l = self._lockref and self._lockref()
921 920 if l:
922 921 l.postrelease.append(callback)
923 922
924 923 def lock(self, wait=True):
925 924 '''Lock the repository store (.hg/store) and return a weak reference
926 925 to the lock. Use this before modifying the store (e.g. committing or
927 926 stripping). If you are opening a transaction, get a lock as well.)'''
928 927 l = self._lockref and self._lockref()
929 928 if l is not None and l.held:
930 929 l.lock()
931 930 return l
932 931
933 932 def unlock():
934 933 self.store.write()
935 934 if self._dirtyphases:
936 935 phases.writeroots(self)
937 936 self._dirtyphases = False
938 937 for k, ce in self._filecache.items():
939 938 if k == 'dirstate':
940 939 continue
941 940 ce.refresh()
942 941
943 942 l = self._lock(self.sjoin("lock"), wait, unlock,
944 943 self.invalidate, _('repository %s') % self.origroot)
945 944 self._lockref = weakref.ref(l)
946 945 return l
947 946
948 947 def wlock(self, wait=True):
949 948 '''Lock the non-store parts of the repository (everything under
950 949 .hg except .hg/store) and return a weak reference to the lock.
951 950 Use this before modifying files in .hg.'''
952 951 l = self._wlockref and self._wlockref()
953 952 if l is not None and l.held:
954 953 l.lock()
955 954 return l
956 955
957 956 def unlock():
958 957 self.dirstate.write()
959 958 ce = self._filecache.get('dirstate')
960 959 if ce:
961 960 ce.refresh()
962 961
963 962 l = self._lock(self.join("wlock"), wait, unlock,
964 963 self.invalidatedirstate, _('working directory of %s') %
965 964 self.origroot)
966 965 self._wlockref = weakref.ref(l)
967 966 return l
968 967
969 968 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
970 969 """
971 970 commit an individual file as part of a larger transaction
972 971 """
973 972
974 973 fname = fctx.path()
975 974 text = fctx.data()
976 975 flog = self.file(fname)
977 976 fparent1 = manifest1.get(fname, nullid)
978 977 fparent2 = fparent2o = manifest2.get(fname, nullid)
979 978
980 979 meta = {}
981 980 copy = fctx.renamed()
982 981 if copy and copy[0] != fname:
983 982 # Mark the new revision of this file as a copy of another
984 983 # file. This copy data will effectively act as a parent
985 984 # of this new revision. If this is a merge, the first
986 985 # parent will be the nullid (meaning "look up the copy data")
987 986 # and the second one will be the other parent. For example:
988 987 #
989 988 # 0 --- 1 --- 3 rev1 changes file foo
990 989 # \ / rev2 renames foo to bar and changes it
991 990 # \- 2 -/ rev3 should have bar with all changes and
992 991 # should record that bar descends from
993 992 # bar in rev2 and foo in rev1
994 993 #
995 994 # this allows this merge to succeed:
996 995 #
997 996 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
998 997 # \ / merging rev3 and rev4 should use bar@rev2
999 998 # \- 2 --- 4 as the merge base
1000 999 #
1001 1000
1002 1001 cfname = copy[0]
1003 1002 crev = manifest1.get(cfname)
1004 1003 newfparent = fparent2
1005 1004
1006 1005 if manifest2: # branch merge
1007 1006 if fparent2 == nullid or crev is None: # copied on remote side
1008 1007 if cfname in manifest2:
1009 1008 crev = manifest2[cfname]
1010 1009 newfparent = fparent1
1011 1010
1012 1011 # find source in nearest ancestor if we've lost track
1013 1012 if not crev:
1014 1013 self.ui.debug(" %s: searching for copy revision for %s\n" %
1015 1014 (fname, cfname))
1016 1015 for ancestor in self[None].ancestors():
1017 1016 if cfname in ancestor:
1018 1017 crev = ancestor[cfname].filenode()
1019 1018 break
1020 1019
1021 1020 if crev:
1022 1021 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1023 1022 meta["copy"] = cfname
1024 1023 meta["copyrev"] = hex(crev)
1025 1024 fparent1, fparent2 = nullid, newfparent
1026 1025 else:
1027 1026 self.ui.warn(_("warning: can't find ancestor for '%s' "
1028 1027 "copied from '%s'!\n") % (fname, cfname))
1029 1028
1030 1029 elif fparent2 != nullid:
1031 1030 # is one parent an ancestor of the other?
1032 1031 fparentancestor = flog.ancestor(fparent1, fparent2)
1033 1032 if fparentancestor == fparent1:
1034 1033 fparent1, fparent2 = fparent2, nullid
1035 1034 elif fparentancestor == fparent2:
1036 1035 fparent2 = nullid
1037 1036
1038 1037 # is the file changed?
1039 1038 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1040 1039 changelist.append(fname)
1041 1040 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1042 1041
1043 1042 # are just the flags changed during merge?
1044 1043 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1045 1044 changelist.append(fname)
1046 1045
1047 1046 return fparent1
1048 1047
1049 1048 def commit(self, text="", user=None, date=None, match=None, force=False,
1050 1049 editor=False, extra={}):
1051 1050 """Add a new revision to current repository.
1052 1051
1053 1052 Revision information is gathered from the working directory,
1054 1053 match can be used to filter the committed files. If editor is
1055 1054 supplied, it is called to get a commit message.
1056 1055 """
1057 1056
1058 1057 def fail(f, msg):
1059 1058 raise util.Abort('%s: %s' % (f, msg))
1060 1059
1061 1060 if not match:
1062 1061 match = matchmod.always(self.root, '')
1063 1062
1064 1063 if not force:
1065 1064 vdirs = []
1066 1065 match.dir = vdirs.append
1067 1066 match.bad = fail
1068 1067
1069 1068 wlock = self.wlock()
1070 1069 try:
1071 1070 wctx = self[None]
1072 1071 merge = len(wctx.parents()) > 1
1073 1072
1074 1073 if (not force and merge and match and
1075 1074 (match.files() or match.anypats())):
1076 1075 raise util.Abort(_('cannot partially commit a merge '
1077 1076 '(do not specify files or patterns)'))
1078 1077
1079 1078 changes = self.status(match=match, clean=force)
1080 1079 if force:
1081 1080 changes[0].extend(changes[6]) # mq may commit unchanged files
1082 1081
1083 1082 # check subrepos
1084 1083 subs = []
1085 1084 commitsubs = set()
1086 1085 newstate = wctx.substate.copy()
1087 1086 # only manage subrepos and .hgsubstate if .hgsub is present
1088 1087 if '.hgsub' in wctx:
1089 1088 # we'll decide whether to track this ourselves, thanks
1090 1089 if '.hgsubstate' in changes[0]:
1091 1090 changes[0].remove('.hgsubstate')
1092 1091 if '.hgsubstate' in changes[2]:
1093 1092 changes[2].remove('.hgsubstate')
1094 1093
1095 1094 # compare current state to last committed state
1096 1095 # build new substate based on last committed state
1097 1096 oldstate = wctx.p1().substate
1098 1097 for s in sorted(newstate.keys()):
1099 1098 if not match(s):
1100 1099 # ignore working copy, use old state if present
1101 1100 if s in oldstate:
1102 1101 newstate[s] = oldstate[s]
1103 1102 continue
1104 1103 if not force:
1105 1104 raise util.Abort(
1106 1105 _("commit with new subrepo %s excluded") % s)
1107 1106 if wctx.sub(s).dirty(True):
1108 1107 if not self.ui.configbool('ui', 'commitsubrepos'):
1109 1108 raise util.Abort(
1110 1109 _("uncommitted changes in subrepo %s") % s,
1111 1110 hint=_("use --subrepos for recursive commit"))
1112 1111 subs.append(s)
1113 1112 commitsubs.add(s)
1114 1113 else:
1115 1114 bs = wctx.sub(s).basestate()
1116 1115 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1117 1116 if oldstate.get(s, (None, None, None))[1] != bs:
1118 1117 subs.append(s)
1119 1118
1120 1119 # check for removed subrepos
1121 1120 for p in wctx.parents():
1122 1121 r = [s for s in p.substate if s not in newstate]
1123 1122 subs += [s for s in r if match(s)]
1124 1123 if subs:
1125 1124 if (not match('.hgsub') and
1126 1125 '.hgsub' in (wctx.modified() + wctx.added())):
1127 1126 raise util.Abort(
1128 1127 _("can't commit subrepos without .hgsub"))
1129 1128 changes[0].insert(0, '.hgsubstate')
1130 1129
1131 1130 elif '.hgsub' in changes[2]:
1132 1131 # clean up .hgsubstate when .hgsub is removed
1133 1132 if ('.hgsubstate' in wctx and
1134 1133 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1135 1134 changes[2].insert(0, '.hgsubstate')
1136 1135
1137 1136 # make sure all explicit patterns are matched
1138 1137 if not force and match.files():
1139 1138 matched = set(changes[0] + changes[1] + changes[2])
1140 1139
1141 1140 for f in match.files():
1142 1141 if f == '.' or f in matched or f in wctx.substate:
1143 1142 continue
1144 1143 if f in changes[3]: # missing
1145 1144 fail(f, _('file not found!'))
1146 1145 if f in vdirs: # visited directory
1147 1146 d = f + '/'
1148 1147 for mf in matched:
1149 1148 if mf.startswith(d):
1150 1149 break
1151 1150 else:
1152 1151 fail(f, _("no match under directory!"))
1153 1152 elif f not in self.dirstate:
1154 1153 fail(f, _("file not tracked!"))
1155 1154
1156 1155 if (not force and not extra.get("close") and not merge
1157 1156 and not (changes[0] or changes[1] or changes[2])
1158 1157 and wctx.branch() == wctx.p1().branch()):
1159 1158 return None
1160 1159
1161 1160 if merge and changes[3]:
1162 1161 raise util.Abort(_("cannot commit merge with missing files"))
1163 1162
1164 1163 ms = mergemod.mergestate(self)
1165 1164 for f in changes[0]:
1166 1165 if f in ms and ms[f] == 'u':
1167 1166 raise util.Abort(_("unresolved merge conflicts "
1168 1167 "(see hg help resolve)"))
1169 1168
1170 1169 cctx = context.workingctx(self, text, user, date, extra, changes)
1171 1170 if editor:
1172 1171 cctx._text = editor(self, cctx, subs)
1173 1172 edited = (text != cctx._text)
1174 1173
1175 1174 # commit subs and write new state
1176 1175 if subs:
1177 1176 for s in sorted(commitsubs):
1178 1177 sub = wctx.sub(s)
1179 1178 self.ui.status(_('committing subrepository %s\n') %
1180 1179 subrepo.subrelpath(sub))
1181 1180 sr = sub.commit(cctx._text, user, date)
1182 1181 newstate[s] = (newstate[s][0], sr)
1183 1182 subrepo.writestate(self, newstate)
1184 1183
1185 1184 # Save commit message in case this transaction gets rolled back
1186 1185 # (e.g. by a pretxncommit hook). Leave the content alone on
1187 1186 # the assumption that the user will use the same editor again.
1188 1187 msgfn = self.savecommitmessage(cctx._text)
1189 1188
1190 1189 p1, p2 = self.dirstate.parents()
1191 1190 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1192 1191 try:
1193 1192 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1194 1193 ret = self.commitctx(cctx, True)
1195 1194 except:
1196 1195 if edited:
1197 1196 self.ui.write(
1198 1197 _('note: commit message saved in %s\n') % msgfn)
1199 1198 raise
1200 1199
1201 1200 # update bookmarks, dirstate and mergestate
1202 1201 bookmarks.update(self, p1, ret)
1203 1202 for f in changes[0] + changes[1]:
1204 1203 self.dirstate.normal(f)
1205 1204 for f in changes[2]:
1206 1205 self.dirstate.drop(f)
1207 1206 self.dirstate.setparents(ret)
1208 1207 ms.reset()
1209 1208 finally:
1210 1209 wlock.release()
1211 1210
1212 1211 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1213 1212 return ret
1214 1213
1215 1214 def commitctx(self, ctx, error=False):
1216 1215 """Add a new revision to current repository.
1217 1216 Revision information is passed via the context argument.
1218 1217 """
1219 1218
1220 1219 tr = lock = None
1221 1220 removed = list(ctx.removed())
1222 1221 p1, p2 = ctx.p1(), ctx.p2()
1223 1222 user = ctx.user()
1224 1223
1225 1224 lock = self.lock()
1226 1225 try:
1227 1226 tr = self.transaction("commit")
1228 1227 trp = weakref.proxy(tr)
1229 1228
1230 1229 if ctx.files():
1231 1230 m1 = p1.manifest().copy()
1232 1231 m2 = p2.manifest()
1233 1232
1234 1233 # check in files
1235 1234 new = {}
1236 1235 changed = []
1237 1236 linkrev = len(self)
1238 1237 for f in sorted(ctx.modified() + ctx.added()):
1239 1238 self.ui.note(f + "\n")
1240 1239 try:
1241 1240 fctx = ctx[f]
1242 1241 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1243 1242 changed)
1244 1243 m1.set(f, fctx.flags())
1245 1244 except OSError, inst:
1246 1245 self.ui.warn(_("trouble committing %s!\n") % f)
1247 1246 raise
1248 1247 except IOError, inst:
1249 1248 errcode = getattr(inst, 'errno', errno.ENOENT)
1250 1249 if error or errcode and errcode != errno.ENOENT:
1251 1250 self.ui.warn(_("trouble committing %s!\n") % f)
1252 1251 raise
1253 1252 else:
1254 1253 removed.append(f)
1255 1254
1256 1255 # update manifest
1257 1256 m1.update(new)
1258 1257 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1259 1258 drop = [f for f in removed if f in m1]
1260 1259 for f in drop:
1261 1260 del m1[f]
1262 1261 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1263 1262 p2.manifestnode(), (new, drop))
1264 1263 files = changed + removed
1265 1264 else:
1266 1265 mn = p1.manifestnode()
1267 1266 files = []
1268 1267
1269 1268 # update changelog
1270 1269 self.changelog.delayupdate()
1271 1270 n = self.changelog.add(mn, files, ctx.description(),
1272 1271 trp, p1.node(), p2.node(),
1273 1272 user, ctx.date(), ctx.extra().copy())
1274 1273 p = lambda: self.changelog.writepending() and self.root or ""
1275 1274 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1276 1275 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1277 1276 parent2=xp2, pending=p)
1278 1277 self.changelog.finalize(trp)
1279 1278 # set the new commit is proper phase
1280 1279 targetphase = phases.newcommitphase(self.ui)
1281 1280 if targetphase:
1282 1281 # retract boundary do not alter parent changeset.
1283 1282 # if a parent have higher the resulting phase will
1284 1283 # be compliant anyway
1285 1284 #
1286 1285 # if minimal phase was 0 we don't need to retract anything
1287 1286 phases.retractboundary(self, targetphase, [n])
1288 1287 tr.close()
1289 1288 self.updatebranchcache()
1290 1289 return n
1291 1290 finally:
1292 1291 if tr:
1293 1292 tr.release()
1294 1293 lock.release()
1295 1294
1296 1295 def destroyed(self):
1297 1296 '''Inform the repository that nodes have been destroyed.
1298 1297 Intended for use by strip and rollback, so there's a common
1299 1298 place for anything that has to be done after destroying history.'''
1300 1299 # XXX it might be nice if we could take the list of destroyed
1301 1300 # nodes, but I don't see an easy way for rollback() to do that
1302 1301
1303 1302 # Ensure the persistent tag cache is updated. Doing it now
1304 1303 # means that the tag cache only has to worry about destroyed
1305 1304 # heads immediately after a strip/rollback. That in turn
1306 1305 # guarantees that "cachetip == currenttip" (comparing both rev
1307 1306 # and node) always means no nodes have been added or destroyed.
1308 1307
1309 1308 # XXX this is suboptimal when qrefresh'ing: we strip the current
1310 1309 # head, refresh the tag cache, then immediately add a new head.
1311 1310 # But I think doing it this way is necessary for the "instant
1312 1311 # tag cache retrieval" case to work.
1313 1312 self.invalidatecaches()
1314 1313
1315 1314 # Discard all cache entries to force reloading everything.
1316 1315 self._filecache.clear()
1317 1316
1318 1317 def walk(self, match, node=None):
1319 1318 '''
1320 1319 walk recursively through the directory tree or a given
1321 1320 changeset, finding all files matched by the match
1322 1321 function
1323 1322 '''
1324 1323 return self[node].walk(match)
1325 1324
1326 1325 def status(self, node1='.', node2=None, match=None,
1327 1326 ignored=False, clean=False, unknown=False,
1328 1327 listsubrepos=False):
1329 1328 """return status of files between two nodes or node and working directory
1330 1329
1331 1330 If node1 is None, use the first dirstate parent instead.
1332 1331 If node2 is None, compare node1 with working directory.
1333 1332 """
1334 1333
1335 1334 def mfmatches(ctx):
1336 1335 mf = ctx.manifest().copy()
1337 1336 for fn in mf.keys():
1338 1337 if not match(fn):
1339 1338 del mf[fn]
1340 1339 return mf
1341 1340
1342 1341 if isinstance(node1, context.changectx):
1343 1342 ctx1 = node1
1344 1343 else:
1345 1344 ctx1 = self[node1]
1346 1345 if isinstance(node2, context.changectx):
1347 1346 ctx2 = node2
1348 1347 else:
1349 1348 ctx2 = self[node2]
1350 1349
1351 1350 working = ctx2.rev() is None
1352 1351 parentworking = working and ctx1 == self['.']
1353 1352 match = match or matchmod.always(self.root, self.getcwd())
1354 1353 listignored, listclean, listunknown = ignored, clean, unknown
1355 1354
1356 1355 # load earliest manifest first for caching reasons
1357 1356 if not working and ctx2.rev() < ctx1.rev():
1358 1357 ctx2.manifest()
1359 1358
1360 1359 if not parentworking:
1361 1360 def bad(f, msg):
1362 1361 # 'f' may be a directory pattern from 'match.files()',
1363 1362 # so 'f not in ctx1' is not enough
1364 1363 if f not in ctx1 and f not in ctx1.dirs():
1365 1364 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1366 1365 match.bad = bad
1367 1366
1368 1367 if working: # we need to scan the working dir
1369 1368 subrepos = []
1370 1369 if '.hgsub' in self.dirstate:
1371 1370 subrepos = ctx2.substate.keys()
1372 1371 s = self.dirstate.status(match, subrepos, listignored,
1373 1372 listclean, listunknown)
1374 1373 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1375 1374
1376 1375 # check for any possibly clean files
1377 1376 if parentworking and cmp:
1378 1377 fixup = []
1379 1378 # do a full compare of any files that might have changed
1380 1379 for f in sorted(cmp):
1381 1380 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1382 1381 or ctx1[f].cmp(ctx2[f])):
1383 1382 modified.append(f)
1384 1383 else:
1385 1384 fixup.append(f)
1386 1385
1387 1386 # update dirstate for files that are actually clean
1388 1387 if fixup:
1389 1388 if listclean:
1390 1389 clean += fixup
1391 1390
1392 1391 try:
1393 1392 # updating the dirstate is optional
1394 1393 # so we don't wait on the lock
1395 1394 wlock = self.wlock(False)
1396 1395 try:
1397 1396 for f in fixup:
1398 1397 self.dirstate.normal(f)
1399 1398 finally:
1400 1399 wlock.release()
1401 1400 except error.LockError:
1402 1401 pass
1403 1402
1404 1403 if not parentworking:
1405 1404 mf1 = mfmatches(ctx1)
1406 1405 if working:
1407 1406 # we are comparing working dir against non-parent
1408 1407 # generate a pseudo-manifest for the working dir
1409 1408 mf2 = mfmatches(self['.'])
1410 1409 for f in cmp + modified + added:
1411 1410 mf2[f] = None
1412 1411 mf2.set(f, ctx2.flags(f))
1413 1412 for f in removed:
1414 1413 if f in mf2:
1415 1414 del mf2[f]
1416 1415 else:
1417 1416 # we are comparing two revisions
1418 1417 deleted, unknown, ignored = [], [], []
1419 1418 mf2 = mfmatches(ctx2)
1420 1419
1421 1420 modified, added, clean = [], [], []
1422 1421 for fn in mf2:
1423 1422 if fn in mf1:
1424 1423 if (fn not in deleted and
1425 1424 (mf1.flags(fn) != mf2.flags(fn) or
1426 1425 (mf1[fn] != mf2[fn] and
1427 1426 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1428 1427 modified.append(fn)
1429 1428 elif listclean:
1430 1429 clean.append(fn)
1431 1430 del mf1[fn]
1432 1431 elif fn not in deleted:
1433 1432 added.append(fn)
1434 1433 removed = mf1.keys()
1435 1434
1436 1435 if working and modified and not self.dirstate._checklink:
1437 1436 # Symlink placeholders may get non-symlink-like contents
1438 1437 # via user error or dereferencing by NFS or Samba servers,
1439 1438 # so we filter out any placeholders that don't look like a
1440 1439 # symlink
1441 1440 sane = []
1442 1441 for f in modified:
1443 1442 if ctx2.flags(f) == 'l':
1444 1443 d = ctx2[f].data()
1445 1444 if len(d) >= 1024 or '\n' in d or util.binary(d):
1446 1445 self.ui.debug('ignoring suspect symlink placeholder'
1447 1446 ' "%s"\n' % f)
1448 1447 continue
1449 1448 sane.append(f)
1450 1449 modified = sane
1451 1450
1452 1451 r = modified, added, removed, deleted, unknown, ignored, clean
1453 1452
1454 1453 if listsubrepos:
1455 1454 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1456 1455 if working:
1457 1456 rev2 = None
1458 1457 else:
1459 1458 rev2 = ctx2.substate[subpath][1]
1460 1459 try:
1461 1460 submatch = matchmod.narrowmatcher(subpath, match)
1462 1461 s = sub.status(rev2, match=submatch, ignored=listignored,
1463 1462 clean=listclean, unknown=listunknown,
1464 1463 listsubrepos=True)
1465 1464 for rfiles, sfiles in zip(r, s):
1466 1465 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1467 1466 except error.LookupError:
1468 1467 self.ui.status(_("skipping missing subrepository: %s\n")
1469 1468 % subpath)
1470 1469
1471 1470 for l in r:
1472 1471 l.sort()
1473 1472 return r
1474 1473
1475 1474 def heads(self, start=None):
1476 1475 heads = self.changelog.heads(start)
1477 1476 # sort the output in rev descending order
1478 1477 return sorted(heads, key=self.changelog.rev, reverse=True)
1479 1478
1480 1479 def branchheads(self, branch=None, start=None, closed=False):
1481 1480 '''return a (possibly filtered) list of heads for the given branch
1482 1481
1483 1482 Heads are returned in topological order, from newest to oldest.
1484 1483 If branch is None, use the dirstate branch.
1485 1484 If start is not None, return only heads reachable from start.
1486 1485 If closed is True, return heads that are marked as closed as well.
1487 1486 '''
1488 1487 if branch is None:
1489 1488 branch = self[None].branch()
1490 1489 branches = self.branchmap()
1491 1490 if branch not in branches:
1492 1491 return []
1493 1492 # the cache returns heads ordered lowest to highest
1494 1493 bheads = list(reversed(branches[branch]))
1495 1494 if start is not None:
1496 1495 # filter out the heads that cannot be reached from startrev
1497 1496 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1498 1497 bheads = [h for h in bheads if h in fbheads]
1499 1498 if not closed:
1500 1499 bheads = [h for h in bheads if
1501 1500 ('close' not in self.changelog.read(h)[5])]
1502 1501 return bheads
1503 1502
1504 1503 def branches(self, nodes):
1505 1504 if not nodes:
1506 1505 nodes = [self.changelog.tip()]
1507 1506 b = []
1508 1507 for n in nodes:
1509 1508 t = n
1510 1509 while True:
1511 1510 p = self.changelog.parents(n)
1512 1511 if p[1] != nullid or p[0] == nullid:
1513 1512 b.append((t, n, p[0], p[1]))
1514 1513 break
1515 1514 n = p[0]
1516 1515 return b
1517 1516
1518 1517 def between(self, pairs):
1519 1518 r = []
1520 1519
1521 1520 for top, bottom in pairs:
1522 1521 n, l, i = top, [], 0
1523 1522 f = 1
1524 1523
1525 1524 while n != bottom and n != nullid:
1526 1525 p = self.changelog.parents(n)[0]
1527 1526 if i == f:
1528 1527 l.append(n)
1529 1528 f = f * 2
1530 1529 n = p
1531 1530 i += 1
1532 1531
1533 1532 r.append(l)
1534 1533
1535 1534 return r
1536 1535
1537 1536 def pull(self, remote, heads=None, force=False):
1538 1537 lock = self.lock()
1539 1538 try:
1540 1539 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1541 1540 force=force)
1542 1541 common, fetch, rheads = tmp
1543 1542 if not fetch:
1544 1543 self.ui.status(_("no changes found\n"))
1545 1544 added = []
1546 1545 result = 0
1547 1546 else:
1548 1547 if heads is None and list(common) == [nullid]:
1549 1548 self.ui.status(_("requesting all changes\n"))
1550 1549 elif heads is None and remote.capable('changegroupsubset'):
1551 1550 # issue1320, avoid a race if remote changed after discovery
1552 1551 heads = rheads
1553 1552
1554 1553 if remote.capable('getbundle'):
1555 1554 cg = remote.getbundle('pull', common=common,
1556 1555 heads=heads or rheads)
1557 1556 elif heads is None:
1558 1557 cg = remote.changegroup(fetch, 'pull')
1559 1558 elif not remote.capable('changegroupsubset'):
1560 1559 raise util.Abort(_("partial pull cannot be done because "
1561 1560 "other repository doesn't support "
1562 1561 "changegroupsubset."))
1563 1562 else:
1564 1563 cg = remote.changegroupsubset(fetch, heads, 'pull')
1565 1564 clstart = len(self.changelog)
1566 1565 result = self.addchangegroup(cg, 'pull', remote.url())
1567 1566 clend = len(self.changelog)
1568 1567 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1569 1568
1570 1569 # compute target subset
1571 1570 if heads is None:
1572 1571 # We pulled every thing possible
1573 1572 # sync on everything common
1574 1573 subset = common + added
1575 1574 else:
1576 1575 # We pulled a specific subset
1577 1576 # sync on this subset
1578 1577 subset = heads
1579 1578
1580 1579 # Get remote phases data from remote
1581 1580 remotephases = remote.listkeys('phases')
1582 1581 publishing = bool(remotephases.get('publishing', False))
1583 1582 if remotephases and not publishing:
1584 1583 # remote is new and unpublishing
1585 1584 pheads, _dr = phases.analyzeremotephases(self, subset,
1586 1585 remotephases)
1587 1586 phases.advanceboundary(self, phases.public, pheads)
1588 1587 phases.advanceboundary(self, phases.draft, subset)
1589 1588 else:
1590 1589 # Remote is old or publishing all common changesets
1591 1590 # should be seen as public
1592 1591 phases.advanceboundary(self, phases.public, subset)
1593 1592 finally:
1594 1593 lock.release()
1595 1594
1596 1595 return result
1597 1596
1598 1597 def checkpush(self, force, revs):
1599 1598 """Extensions can override this function if additional checks have
1600 1599 to be performed before pushing, or call it if they override push
1601 1600 command.
1602 1601 """
1603 1602 pass
1604 1603
1605 1604 def push(self, remote, force=False, revs=None, newbranch=False):
1606 1605 '''Push outgoing changesets (limited by revs) from the current
1607 1606 repository to remote. Return an integer:
1608 1607 - None means nothing to push
1609 1608 - 0 means HTTP error
1610 1609 - 1 means we pushed and remote head count is unchanged *or*
1611 1610 we have outgoing changesets but refused to push
1612 1611 - other values as described by addchangegroup()
1613 1612 '''
1614 1613 # there are two ways to push to remote repo:
1615 1614 #
1616 1615 # addchangegroup assumes local user can lock remote
1617 1616 # repo (local filesystem, old ssh servers).
1618 1617 #
1619 1618 # unbundle assumes local user cannot lock remote repo (new ssh
1620 1619 # servers, http servers).
1621 1620
1622 1621 # get local lock as we might write phase data
1623 1622 locallock = self.lock()
1624 1623 try:
1625 1624 self.checkpush(force, revs)
1626 1625 lock = None
1627 1626 unbundle = remote.capable('unbundle')
1628 1627 if not unbundle:
1629 1628 lock = remote.lock()
1630 1629 try:
1631 1630 # discovery
1632 1631 fci = discovery.findcommonincoming
1633 1632 commoninc = fci(self, remote, force=force)
1634 1633 common, inc, remoteheads = commoninc
1635 1634 fco = discovery.findcommonoutgoing
1636 1635 outgoing = fco(self, remote, onlyheads=revs,
1637 1636 commoninc=commoninc, force=force)
1638 1637
1639 1638
1640 1639 if not outgoing.missing:
1641 1640 # nothing to push
1642 1641 scmutil.nochangesfound(self.ui, outgoing.excluded)
1643 1642 ret = None
1644 1643 else:
1645 1644 # something to push
1646 1645 if not force:
1647 1646 discovery.checkheads(self, remote, outgoing,
1648 1647 remoteheads, newbranch,
1649 1648 bool(inc))
1650 1649
1651 1650 # create a changegroup from local
1652 1651 if revs is None and not outgoing.excluded:
1653 1652 # push everything,
1654 1653 # use the fast path, no race possible on push
1655 1654 cg = self._changegroup(outgoing.missing, 'push')
1656 1655 else:
1657 1656 cg = self.getlocalbundle('push', outgoing)
1658 1657
1659 1658 # apply changegroup to remote
1660 1659 if unbundle:
1661 1660 # local repo finds heads on server, finds out what
1662 1661 # revs it must push. once revs transferred, if server
1663 1662 # finds it has different heads (someone else won
1664 1663 # commit/push race), server aborts.
1665 1664 if force:
1666 1665 remoteheads = ['force']
1667 1666 # ssh: return remote's addchangegroup()
1668 1667 # http: return remote's addchangegroup() or 0 for error
1669 1668 ret = remote.unbundle(cg, remoteheads, 'push')
1670 1669 else:
1671 1670 # we return an integer indicating remote head count change
1672 1671 ret = remote.addchangegroup(cg, 'push', self.url())
1673 1672
1674 1673 if ret:
1675 1674 # push succeed, synchonize target of the push
1676 1675 cheads = outgoing.missingheads
1677 1676 elif revs is None:
1678 1677 # All out push fails. synchronize all common
1679 1678 cheads = outgoing.commonheads
1680 1679 else:
1681 1680 # I want cheads = heads(::missingheads and ::commonheads)
1682 1681 # (missingheads is revs with secret changeset filtered out)
1683 1682 #
1684 1683 # This can be expressed as:
1685 1684 # cheads = ( (missingheads and ::commonheads)
1686 1685 # + (commonheads and ::missingheads))"
1687 1686 # )
1688 1687 #
1689 1688 # while trying to push we already computed the following:
1690 1689 # common = (::commonheads)
1691 1690 # missing = ((commonheads::missingheads) - commonheads)
1692 1691 #
1693 1692 # We can pick:
1694 1693 # * missingheads part of comon (::commonheads)
1695 1694 common = set(outgoing.common)
1696 1695 cheads = [node for node in revs if node in common]
1697 1696 # and
1698 1697 # * commonheads parents on missing
1699 1698 revset = self.set('%ln and parents(roots(%ln))',
1700 1699 outgoing.commonheads,
1701 1700 outgoing.missing)
1702 1701 cheads.extend(c.node() for c in revset)
1703 1702 # even when we don't push, exchanging phase data is useful
1704 1703 remotephases = remote.listkeys('phases')
1705 1704 if not remotephases: # old server or public only repo
1706 1705 phases.advanceboundary(self, phases.public, cheads)
1707 1706 # don't push any phase data as there is nothing to push
1708 1707 else:
1709 1708 ana = phases.analyzeremotephases(self, cheads, remotephases)
1710 1709 pheads, droots = ana
1711 1710 ### Apply remote phase on local
1712 1711 if remotephases.get('publishing', False):
1713 1712 phases.advanceboundary(self, phases.public, cheads)
1714 1713 else: # publish = False
1715 1714 phases.advanceboundary(self, phases.public, pheads)
1716 1715 phases.advanceboundary(self, phases.draft, cheads)
1717 1716 ### Apply local phase on remote
1718 1717
1719 1718 # Get the list of all revs draft on remote by public here.
1720 1719 # XXX Beware that revset break if droots is not strictly
1721 1720 # XXX root we may want to ensure it is but it is costly
1722 1721 outdated = self.set('heads((%ln::%ln) and public())',
1723 1722 droots, cheads)
1724 1723 for newremotehead in outdated:
1725 1724 r = remote.pushkey('phases',
1726 1725 newremotehead.hex(),
1727 1726 str(phases.draft),
1728 1727 str(phases.public))
1729 1728 if not r:
1730 1729 self.ui.warn(_('updating %s to public failed!\n')
1731 1730 % newremotehead)
1732 1731 finally:
1733 1732 if lock is not None:
1734 1733 lock.release()
1735 1734 finally:
1736 1735 locallock.release()
1737 1736
1738 1737 self.ui.debug("checking for updated bookmarks\n")
1739 1738 rb = remote.listkeys('bookmarks')
1740 1739 for k in rb.keys():
1741 1740 if k in self._bookmarks:
1742 1741 nr, nl = rb[k], hex(self._bookmarks[k])
1743 1742 if nr in self:
1744 1743 cr = self[nr]
1745 1744 cl = self[nl]
1746 1745 if cl in cr.descendants():
1747 1746 r = remote.pushkey('bookmarks', k, nr, nl)
1748 1747 if r:
1749 1748 self.ui.status(_("updating bookmark %s\n") % k)
1750 1749 else:
1751 1750 self.ui.warn(_('updating bookmark %s'
1752 1751 ' failed!\n') % k)
1753 1752
1754 1753 return ret
1755 1754
1756 1755 def changegroupinfo(self, nodes, source):
1757 1756 if self.ui.verbose or source == 'bundle':
1758 1757 self.ui.status(_("%d changesets found\n") % len(nodes))
1759 1758 if self.ui.debugflag:
1760 1759 self.ui.debug("list of changesets:\n")
1761 1760 for node in nodes:
1762 1761 self.ui.debug("%s\n" % hex(node))
1763 1762
1764 1763 def changegroupsubset(self, bases, heads, source):
1765 1764 """Compute a changegroup consisting of all the nodes that are
1766 1765 descendants of any of the bases and ancestors of any of the heads.
1767 1766 Return a chunkbuffer object whose read() method will return
1768 1767 successive changegroup chunks.
1769 1768
1770 1769 It is fairly complex as determining which filenodes and which
1771 1770 manifest nodes need to be included for the changeset to be complete
1772 1771 is non-trivial.
1773 1772
1774 1773 Another wrinkle is doing the reverse, figuring out which changeset in
1775 1774 the changegroup a particular filenode or manifestnode belongs to.
1776 1775 """
1777 1776 cl = self.changelog
1778 1777 if not bases:
1779 1778 bases = [nullid]
1780 1779 csets, bases, heads = cl.nodesbetween(bases, heads)
1781 1780 # We assume that all ancestors of bases are known
1782 1781 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1783 1782 return self._changegroupsubset(common, csets, heads, source)
1784 1783
1785 1784 def getlocalbundle(self, source, outgoing):
1786 1785 """Like getbundle, but taking a discovery.outgoing as an argument.
1787 1786
1788 1787 This is only implemented for local repos and reuses potentially
1789 1788 precomputed sets in outgoing."""
1790 1789 if not outgoing.missing:
1791 1790 return None
1792 1791 return self._changegroupsubset(outgoing.common,
1793 1792 outgoing.missing,
1794 1793 outgoing.missingheads,
1795 1794 source)
1796 1795
1797 1796 def getbundle(self, source, heads=None, common=None):
1798 1797 """Like changegroupsubset, but returns the set difference between the
1799 1798 ancestors of heads and the ancestors common.
1800 1799
1801 1800 If heads is None, use the local heads. If common is None, use [nullid].
1802 1801
1803 1802 The nodes in common might not all be known locally due to the way the
1804 1803 current discovery protocol works.
1805 1804 """
1806 1805 cl = self.changelog
1807 1806 if common:
1808 1807 nm = cl.nodemap
1809 1808 common = [n for n in common if n in nm]
1810 1809 else:
1811 1810 common = [nullid]
1812 1811 if not heads:
1813 1812 heads = cl.heads()
1814 1813 return self.getlocalbundle(source,
1815 1814 discovery.outgoing(cl, common, heads))
1816 1815
1817 1816 def _changegroupsubset(self, commonrevs, csets, heads, source):
1818 1817
1819 1818 cl = self.changelog
1820 1819 mf = self.manifest
1821 1820 mfs = {} # needed manifests
1822 1821 fnodes = {} # needed file nodes
1823 1822 changedfiles = set()
1824 1823 fstate = ['', {}]
1825 1824 count = [0, 0]
1826 1825
1827 1826 # can we go through the fast path ?
1828 1827 heads.sort()
1829 1828 if heads == sorted(self.heads()):
1830 1829 return self._changegroup(csets, source)
1831 1830
1832 1831 # slow path
1833 1832 self.hook('preoutgoing', throw=True, source=source)
1834 1833 self.changegroupinfo(csets, source)
1835 1834
1836 1835 # filter any nodes that claim to be part of the known set
1837 1836 def prune(revlog, missing):
1838 1837 rr, rl = revlog.rev, revlog.linkrev
1839 1838 return [n for n in missing
1840 1839 if rl(rr(n)) not in commonrevs]
1841 1840
1842 1841 progress = self.ui.progress
1843 1842 _bundling = _('bundling')
1844 1843 _changesets = _('changesets')
1845 1844 _manifests = _('manifests')
1846 1845 _files = _('files')
1847 1846
1848 1847 def lookup(revlog, x):
1849 1848 if revlog == cl:
1850 1849 c = cl.read(x)
1851 1850 changedfiles.update(c[3])
1852 1851 mfs.setdefault(c[0], x)
1853 1852 count[0] += 1
1854 1853 progress(_bundling, count[0],
1855 1854 unit=_changesets, total=count[1])
1856 1855 return x
1857 1856 elif revlog == mf:
1858 1857 clnode = mfs[x]
1859 1858 mdata = mf.readfast(x)
1860 1859 for f, n in mdata.iteritems():
1861 1860 if f in changedfiles:
1862 1861 fnodes[f].setdefault(n, clnode)
1863 1862 count[0] += 1
1864 1863 progress(_bundling, count[0],
1865 1864 unit=_manifests, total=count[1])
1866 1865 return clnode
1867 1866 else:
1868 1867 progress(_bundling, count[0], item=fstate[0],
1869 1868 unit=_files, total=count[1])
1870 1869 return fstate[1][x]
1871 1870
1872 1871 bundler = changegroup.bundle10(lookup)
1873 1872 reorder = self.ui.config('bundle', 'reorder', 'auto')
1874 1873 if reorder == 'auto':
1875 1874 reorder = None
1876 1875 else:
1877 1876 reorder = util.parsebool(reorder)
1878 1877
1879 1878 def gengroup():
1880 1879 # Create a changenode group generator that will call our functions
1881 1880 # back to lookup the owning changenode and collect information.
1882 1881 count[:] = [0, len(csets)]
1883 1882 for chunk in cl.group(csets, bundler, reorder=reorder):
1884 1883 yield chunk
1885 1884 progress(_bundling, None)
1886 1885
1887 1886 # Create a generator for the manifestnodes that calls our lookup
1888 1887 # and data collection functions back.
1889 1888 for f in changedfiles:
1890 1889 fnodes[f] = {}
1891 1890 count[:] = [0, len(mfs)]
1892 1891 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1893 1892 yield chunk
1894 1893 progress(_bundling, None)
1895 1894
1896 1895 mfs.clear()
1897 1896
1898 1897 # Go through all our files in order sorted by name.
1899 1898 count[:] = [0, len(changedfiles)]
1900 1899 for fname in sorted(changedfiles):
1901 1900 filerevlog = self.file(fname)
1902 1901 if not len(filerevlog):
1903 1902 raise util.Abort(_("empty or missing revlog for %s") % fname)
1904 1903 fstate[0] = fname
1905 1904 fstate[1] = fnodes.pop(fname, {})
1906 1905
1907 1906 nodelist = prune(filerevlog, fstate[1])
1908 1907 if nodelist:
1909 1908 count[0] += 1
1910 1909 yield bundler.fileheader(fname)
1911 1910 for chunk in filerevlog.group(nodelist, bundler, reorder):
1912 1911 yield chunk
1913 1912
1914 1913 # Signal that no more groups are left.
1915 1914 yield bundler.close()
1916 1915 progress(_bundling, None)
1917 1916
1918 1917 if csets:
1919 1918 self.hook('outgoing', node=hex(csets[0]), source=source)
1920 1919
1921 1920 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1922 1921
1923 1922 def changegroup(self, basenodes, source):
1924 1923 # to avoid a race we use changegroupsubset() (issue1320)
1925 1924 return self.changegroupsubset(basenodes, self.heads(), source)
1926 1925
1927 1926 def _changegroup(self, nodes, source):
1928 1927 """Compute the changegroup of all nodes that we have that a recipient
1929 1928 doesn't. Return a chunkbuffer object whose read() method will return
1930 1929 successive changegroup chunks.
1931 1930
1932 1931 This is much easier than the previous function as we can assume that
1933 1932 the recipient has any changenode we aren't sending them.
1934 1933
1935 1934 nodes is the set of nodes to send"""
1936 1935
1937 1936 cl = self.changelog
1938 1937 mf = self.manifest
1939 1938 mfs = {}
1940 1939 changedfiles = set()
1941 1940 fstate = ['']
1942 1941 count = [0, 0]
1943 1942
1944 1943 self.hook('preoutgoing', throw=True, source=source)
1945 1944 self.changegroupinfo(nodes, source)
1946 1945
1947 1946 revset = set([cl.rev(n) for n in nodes])
1948 1947
1949 1948 def gennodelst(log):
1950 1949 ln, llr = log.node, log.linkrev
1951 1950 return [ln(r) for r in log if llr(r) in revset]
1952 1951
1953 1952 progress = self.ui.progress
1954 1953 _bundling = _('bundling')
1955 1954 _changesets = _('changesets')
1956 1955 _manifests = _('manifests')
1957 1956 _files = _('files')
1958 1957
1959 1958 def lookup(revlog, x):
1960 1959 if revlog == cl:
1961 1960 c = cl.read(x)
1962 1961 changedfiles.update(c[3])
1963 1962 mfs.setdefault(c[0], x)
1964 1963 count[0] += 1
1965 1964 progress(_bundling, count[0],
1966 1965 unit=_changesets, total=count[1])
1967 1966 return x
1968 1967 elif revlog == mf:
1969 1968 count[0] += 1
1970 1969 progress(_bundling, count[0],
1971 1970 unit=_manifests, total=count[1])
1972 1971 return cl.node(revlog.linkrev(revlog.rev(x)))
1973 1972 else:
1974 1973 progress(_bundling, count[0], item=fstate[0],
1975 1974 total=count[1], unit=_files)
1976 1975 return cl.node(revlog.linkrev(revlog.rev(x)))
1977 1976
1978 1977 bundler = changegroup.bundle10(lookup)
1979 1978 reorder = self.ui.config('bundle', 'reorder', 'auto')
1980 1979 if reorder == 'auto':
1981 1980 reorder = None
1982 1981 else:
1983 1982 reorder = util.parsebool(reorder)
1984 1983
1985 1984 def gengroup():
1986 1985 '''yield a sequence of changegroup chunks (strings)'''
1987 1986 # construct a list of all changed files
1988 1987
1989 1988 count[:] = [0, len(nodes)]
1990 1989 for chunk in cl.group(nodes, bundler, reorder=reorder):
1991 1990 yield chunk
1992 1991 progress(_bundling, None)
1993 1992
1994 1993 count[:] = [0, len(mfs)]
1995 1994 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1996 1995 yield chunk
1997 1996 progress(_bundling, None)
1998 1997
1999 1998 count[:] = [0, len(changedfiles)]
2000 1999 for fname in sorted(changedfiles):
2001 2000 filerevlog = self.file(fname)
2002 2001 if not len(filerevlog):
2003 2002 raise util.Abort(_("empty or missing revlog for %s") % fname)
2004 2003 fstate[0] = fname
2005 2004 nodelist = gennodelst(filerevlog)
2006 2005 if nodelist:
2007 2006 count[0] += 1
2008 2007 yield bundler.fileheader(fname)
2009 2008 for chunk in filerevlog.group(nodelist, bundler, reorder):
2010 2009 yield chunk
2011 2010 yield bundler.close()
2012 2011 progress(_bundling, None)
2013 2012
2014 2013 if nodes:
2015 2014 self.hook('outgoing', node=hex(nodes[0]), source=source)
2016 2015
2017 2016 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2018 2017
2019 2018 def addchangegroup(self, source, srctype, url, emptyok=False):
2020 2019 """Add the changegroup returned by source.read() to this repo.
2021 2020 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2022 2021 the URL of the repo where this changegroup is coming from.
2023 2022
2024 2023 Return an integer summarizing the change to this repo:
2025 2024 - nothing changed or no source: 0
2026 2025 - more heads than before: 1+added heads (2..n)
2027 2026 - fewer heads than before: -1-removed heads (-2..-n)
2028 2027 - number of heads stays the same: 1
2029 2028 """
2030 2029 def csmap(x):
2031 2030 self.ui.debug("add changeset %s\n" % short(x))
2032 2031 return len(cl)
2033 2032
2034 2033 def revmap(x):
2035 2034 return cl.rev(x)
2036 2035
2037 2036 if not source:
2038 2037 return 0
2039 2038
2040 2039 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2041 2040
2042 2041 changesets = files = revisions = 0
2043 2042 efiles = set()
2044 2043
2045 2044 # write changelog data to temp files so concurrent readers will not see
2046 2045 # inconsistent view
2047 2046 cl = self.changelog
2048 2047 cl.delayupdate()
2049 2048 oldheads = cl.heads()
2050 2049
2051 2050 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2052 2051 try:
2053 2052 trp = weakref.proxy(tr)
2054 2053 # pull off the changeset group
2055 2054 self.ui.status(_("adding changesets\n"))
2056 2055 clstart = len(cl)
2057 2056 class prog(object):
2058 2057 step = _('changesets')
2059 2058 count = 1
2060 2059 ui = self.ui
2061 2060 total = None
2062 2061 def __call__(self):
2063 2062 self.ui.progress(self.step, self.count, unit=_('chunks'),
2064 2063 total=self.total)
2065 2064 self.count += 1
2066 2065 pr = prog()
2067 2066 source.callback = pr
2068 2067
2069 2068 source.changelogheader()
2070 2069 srccontent = cl.addgroup(source, csmap, trp)
2071 2070 if not (srccontent or emptyok):
2072 2071 raise util.Abort(_("received changelog group is empty"))
2073 2072 clend = len(cl)
2074 2073 changesets = clend - clstart
2075 2074 for c in xrange(clstart, clend):
2076 2075 efiles.update(self[c].files())
2077 2076 efiles = len(efiles)
2078 2077 self.ui.progress(_('changesets'), None)
2079 2078
2080 2079 # pull off the manifest group
2081 2080 self.ui.status(_("adding manifests\n"))
2082 2081 pr.step = _('manifests')
2083 2082 pr.count = 1
2084 2083 pr.total = changesets # manifests <= changesets
2085 2084 # no need to check for empty manifest group here:
2086 2085 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2087 2086 # no new manifest will be created and the manifest group will
2088 2087 # be empty during the pull
2089 2088 source.manifestheader()
2090 2089 self.manifest.addgroup(source, revmap, trp)
2091 2090 self.ui.progress(_('manifests'), None)
2092 2091
2093 2092 needfiles = {}
2094 2093 if self.ui.configbool('server', 'validate', default=False):
2095 2094 # validate incoming csets have their manifests
2096 2095 for cset in xrange(clstart, clend):
2097 2096 mfest = self.changelog.read(self.changelog.node(cset))[0]
2098 2097 mfest = self.manifest.readdelta(mfest)
2099 2098 # store file nodes we must see
2100 2099 for f, n in mfest.iteritems():
2101 2100 needfiles.setdefault(f, set()).add(n)
2102 2101
2103 2102 # process the files
2104 2103 self.ui.status(_("adding file changes\n"))
2105 2104 pr.step = _('files')
2106 2105 pr.count = 1
2107 2106 pr.total = efiles
2108 2107 source.callback = None
2109 2108
2110 2109 while True:
2111 2110 chunkdata = source.filelogheader()
2112 2111 if not chunkdata:
2113 2112 break
2114 2113 f = chunkdata["filename"]
2115 2114 self.ui.debug("adding %s revisions\n" % f)
2116 2115 pr()
2117 2116 fl = self.file(f)
2118 2117 o = len(fl)
2119 2118 if not fl.addgroup(source, revmap, trp):
2120 2119 raise util.Abort(_("received file revlog group is empty"))
2121 2120 revisions += len(fl) - o
2122 2121 files += 1
2123 2122 if f in needfiles:
2124 2123 needs = needfiles[f]
2125 2124 for new in xrange(o, len(fl)):
2126 2125 n = fl.node(new)
2127 2126 if n in needs:
2128 2127 needs.remove(n)
2129 2128 if not needs:
2130 2129 del needfiles[f]
2131 2130 self.ui.progress(_('files'), None)
2132 2131
2133 2132 for f, needs in needfiles.iteritems():
2134 2133 fl = self.file(f)
2135 2134 for n in needs:
2136 2135 try:
2137 2136 fl.rev(n)
2138 2137 except error.LookupError:
2139 2138 raise util.Abort(
2140 2139 _('missing file data for %s:%s - run hg verify') %
2141 2140 (f, hex(n)))
2142 2141
2143 2142 dh = 0
2144 2143 if oldheads:
2145 2144 heads = cl.heads()
2146 2145 dh = len(heads) - len(oldheads)
2147 2146 for h in heads:
2148 2147 if h not in oldheads and 'close' in self[h].extra():
2149 2148 dh -= 1
2150 2149 htext = ""
2151 2150 if dh:
2152 2151 htext = _(" (%+d heads)") % dh
2153 2152
2154 2153 self.ui.status(_("added %d changesets"
2155 2154 " with %d changes to %d files%s\n")
2156 2155 % (changesets, revisions, files, htext))
2157 2156
2158 2157 if changesets > 0:
2159 2158 p = lambda: cl.writepending() and self.root or ""
2160 2159 self.hook('pretxnchangegroup', throw=True,
2161 2160 node=hex(cl.node(clstart)), source=srctype,
2162 2161 url=url, pending=p)
2163 2162
2164 2163 added = [cl.node(r) for r in xrange(clstart, clend)]
2165 2164 publishing = self.ui.configbool('phases', 'publish', True)
2166 2165 if srctype == 'push':
2167 2166 # Old server can not push the boundary themself.
2168 2167 # New server won't push the boundary if changeset already
2169 2168 # existed locally as secrete
2170 2169 #
2171 2170 # We should not use added here but the list of all change in
2172 2171 # the bundle
2173 2172 if publishing:
2174 2173 phases.advanceboundary(self, phases.public, srccontent)
2175 2174 else:
2176 2175 phases.advanceboundary(self, phases.draft, srccontent)
2177 2176 phases.retractboundary(self, phases.draft, added)
2178 2177 elif srctype != 'strip':
2179 2178 # publishing only alter behavior during push
2180 2179 #
2181 2180 # strip should not touch boundary at all
2182 2181 phases.retractboundary(self, phases.draft, added)
2183 2182
2184 2183 # make changelog see real files again
2185 2184 cl.finalize(trp)
2186 2185
2187 2186 tr.close()
2188 2187
2189 2188 if changesets > 0:
2190 2189 def runhooks():
2191 2190 # forcefully update the on-disk branch cache
2192 2191 self.ui.debug("updating the branch cache\n")
2193 2192 self.updatebranchcache()
2194 2193 self.hook("changegroup", node=hex(cl.node(clstart)),
2195 2194 source=srctype, url=url)
2196 2195
2197 2196 for n in added:
2198 2197 self.hook("incoming", node=hex(n), source=srctype,
2199 2198 url=url)
2200 2199 self._afterlock(runhooks)
2201 2200
2202 2201 finally:
2203 2202 tr.release()
2204 2203 # never return 0 here:
2205 2204 if dh < 0:
2206 2205 return dh - 1
2207 2206 else:
2208 2207 return dh + 1
2209 2208
2210 2209 def stream_in(self, remote, requirements):
2211 2210 lock = self.lock()
2212 2211 try:
2213 2212 fp = remote.stream_out()
2214 2213 l = fp.readline()
2215 2214 try:
2216 2215 resp = int(l)
2217 2216 except ValueError:
2218 2217 raise error.ResponseError(
2219 2218 _('Unexpected response from remote server:'), l)
2220 2219 if resp == 1:
2221 2220 raise util.Abort(_('operation forbidden by server'))
2222 2221 elif resp == 2:
2223 2222 raise util.Abort(_('locking the remote repository failed'))
2224 2223 elif resp != 0:
2225 2224 raise util.Abort(_('the server sent an unknown error code'))
2226 2225 self.ui.status(_('streaming all changes\n'))
2227 2226 l = fp.readline()
2228 2227 try:
2229 2228 total_files, total_bytes = map(int, l.split(' ', 1))
2230 2229 except (ValueError, TypeError):
2231 2230 raise error.ResponseError(
2232 2231 _('Unexpected response from remote server:'), l)
2233 2232 self.ui.status(_('%d files to transfer, %s of data\n') %
2234 2233 (total_files, util.bytecount(total_bytes)))
2235 2234 start = time.time()
2236 2235 for i in xrange(total_files):
2237 2236 # XXX doesn't support '\n' or '\r' in filenames
2238 2237 l = fp.readline()
2239 2238 try:
2240 2239 name, size = l.split('\0', 1)
2241 2240 size = int(size)
2242 2241 except (ValueError, TypeError):
2243 2242 raise error.ResponseError(
2244 2243 _('Unexpected response from remote server:'), l)
2245 2244 if self.ui.debugflag:
2246 2245 self.ui.debug('adding %s (%s)\n' %
2247 2246 (name, util.bytecount(size)))
2248 2247 # for backwards compat, name was partially encoded
2249 2248 ofp = self.sopener(store.decodedir(name), 'w')
2250 2249 for chunk in util.filechunkiter(fp, limit=size):
2251 2250 ofp.write(chunk)
2252 2251 ofp.close()
2253 2252 elapsed = time.time() - start
2254 2253 if elapsed <= 0:
2255 2254 elapsed = 0.001
2256 2255 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2257 2256 (util.bytecount(total_bytes), elapsed,
2258 2257 util.bytecount(total_bytes / elapsed)))
2259 2258
2260 2259 # new requirements = old non-format requirements + new format-related
2261 2260 # requirements from the streamed-in repository
2262 2261 requirements.update(set(self.requirements) - self.supportedformats)
2263 2262 self._applyrequirements(requirements)
2264 2263 self._writerequirements()
2265 2264
2266 2265 self.invalidate()
2267 2266 return len(self.heads()) + 1
2268 2267 finally:
2269 2268 lock.release()
2270 2269
2271 2270 def clone(self, remote, heads=[], stream=False):
2272 2271 '''clone remote repository.
2273 2272
2274 2273 keyword arguments:
2275 2274 heads: list of revs to clone (forces use of pull)
2276 2275 stream: use streaming clone if possible'''
2277 2276
2278 2277 # now, all clients that can request uncompressed clones can
2279 2278 # read repo formats supported by all servers that can serve
2280 2279 # them.
2281 2280
2282 2281 # if revlog format changes, client will have to check version
2283 2282 # and format flags on "stream" capability, and use
2284 2283 # uncompressed only if compatible.
2285 2284
2286 2285 if not stream:
2287 2286 # if the server explicitely prefer to stream (for fast LANs)
2288 2287 stream = remote.capable('stream-preferred')
2289 2288
2290 2289 if stream and not heads:
2291 2290 # 'stream' means remote revlog format is revlogv1 only
2292 2291 if remote.capable('stream'):
2293 2292 return self.stream_in(remote, set(('revlogv1',)))
2294 2293 # otherwise, 'streamreqs' contains the remote revlog format
2295 2294 streamreqs = remote.capable('streamreqs')
2296 2295 if streamreqs:
2297 2296 streamreqs = set(streamreqs.split(','))
2298 2297 # if we support it, stream in and adjust our requirements
2299 2298 if not streamreqs - self.supportedformats:
2300 2299 return self.stream_in(remote, streamreqs)
2301 2300 return self.pull(remote, heads)
2302 2301
2303 2302 def pushkey(self, namespace, key, old, new):
2304 2303 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2305 2304 old=old, new=new)
2306 2305 ret = pushkey.push(self, namespace, key, old, new)
2307 2306 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2308 2307 ret=ret)
2309 2308 return ret
2310 2309
2311 2310 def listkeys(self, namespace):
2312 2311 self.hook('prelistkeys', throw=True, namespace=namespace)
2313 2312 values = pushkey.list(self, namespace)
2314 2313 self.hook('listkeys', namespace=namespace, values=values)
2315 2314 return values
2316 2315
2317 2316 def debugwireargs(self, one, two, three=None, four=None, five=None):
2318 2317 '''used to test argument passing over the wire'''
2319 2318 return "%s %s %s %s %s" % (one, two, three, four, five)
2320 2319
2321 2320 def savecommitmessage(self, text):
2322 2321 fp = self.opener('last-message.txt', 'wb')
2323 2322 try:
2324 2323 fp.write(text)
2325 2324 finally:
2326 2325 fp.close()
2327 2326 return self.pathto(fp.name[len(self.root)+1:])
2328 2327
2329 2328 # used to avoid circular references so destructors work
2330 2329 def aftertrans(files):
2331 2330 renamefiles = [tuple(t) for t in files]
2332 2331 def a():
2333 2332 for src, dest in renamefiles:
2334 2333 try:
2335 2334 util.rename(src, dest)
2336 2335 except OSError: # journal file does not yet exist
2337 2336 pass
2338 2337 return a
2339 2338
2340 2339 def undoname(fn):
2341 2340 base, name = os.path.split(fn)
2342 2341 assert name.startswith('journal')
2343 2342 return os.path.join(base, name.replace('journal', 'undo', 1))
2344 2343
2345 2344 def instance(ui, path, create):
2346 2345 return localrepository(ui, util.urllocalpath(path), create)
2347 2346
2348 2347 def islocal(path):
2349 2348 return True
@@ -1,343 +1,345 b''
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms of the
10 10 GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phases' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described below,
22 22 here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not permanent and
25 25 leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered, so they
28 28 can be considered from lowest to highest. The default, lowest phase is 'public'
29 29 - this is the normal phase of existing changesets. A child changeset can not be
30 30 in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 local commits are draft by default
40 40
41 41 Phase movement and exchange
42 42 ============================
43 43
44 44 Phase data are exchanged by pushkey on pull and push. Some server have a
45 45 publish option set, we call them publishing server. Pushing to such server make
46 46 draft changeset publish.
47 47
48 48 A small list of fact/rules define the exchange of phase:
49 49
50 50 * old client never changes server states
51 51 * pull never changes server states
52 52 * publish and old server csets are seen as public by client
53 53
54 54 * Any secret changeset seens in another repository is lowered to at least draft
55 55
56 56
57 57 Here is the final table summing up the 49 possible usecase of phase exchange:
58 58
59 59 server
60 60 old publish non-publish
61 61 N X N D P N D P
62 62 old client
63 63 pull
64 64 N - X/X - X/D X/P - X/D X/P
65 65 X - X/X - X/D X/P - X/D X/P
66 66 push
67 67 X X/X X/X X/P X/P X/P X/D X/D X/P
68 68 new client
69 69 pull
70 70 N - P/X - P/D P/P - D/D P/P
71 71 D - P/X - P/D P/P - D/D P/P
72 72 P - P/X - P/D P/P - P/D P/P
73 73 push
74 74 D P/X P/X P/P P/P P/P D/D D/D P/P
75 75 P P/X P/X P/P P/P P/P P/P P/P P/P
76 76
77 77 Legend:
78 78
79 79 A/B = final state on client / state on server
80 80
81 81 * N = new/not present,
82 82 * P = public,
83 83 * D = draft,
84 84 * X = not tracked (ie: the old client or server has no internal way of
85 85 recording the phase.)
86 86
87 87 passive = only pushes
88 88
89 89
90 90 A cell here can be read like this:
91 91
92 92 "When a new client pushes a draft changeset (D) to a publishing server
93 93 where it's not present (N), it's marked public on both sides (P/P)."
94 94
95 95 Note: old client behave as publish server with Draft only content
96 96 - other people see it as public
97 97 - content is pushed as draft
98 98
99 99 """
100 100
101 101 import errno
102 102 from node import nullid, bin, hex, short
103 103 from i18n import _
104 104
105 105 allphases = public, draft, secret = range(3)
106 106 trackedphases = allphases[1:]
107 107 phasenames = ['public', 'draft', 'secret']
108 108
109 def _filterunknown(ui, changelog, phaseroots):
110 """remove unknown nodes from the phase boundary
111
112 Nothing is lost as unknown nodes only hold data for their descendants
113 """
114 updated = False
115 nodemap = changelog.nodemap # to filter unknown nodes
116 for phase, nodes in enumerate(phaseroots):
117 missing = [node for node in nodes if node not in nodemap]
118 if missing:
119 for mnode in missing:
120 ui.debug(
121 'removing unknown node %s from %i-phase boundary\n'
122 % (short(mnode), phase))
123 nodes.symmetric_difference_update(missing)
124 updated = True
125 return updated
126
109 127 def readroots(repo):
110 128 """Read phase roots from disk"""
111 129 roots = [set() for i in allphases]
112 130 try:
113 131 f = repo.sopener('phaseroots')
114 132 try:
115 133 for line in f:
116 134 phase, nh = line.split()
117 135 roots[int(phase)].add(bin(nh))
118 136 finally:
119 137 f.close()
120 138 except IOError, inst:
121 139 if inst.errno != errno.ENOENT:
122 140 raise
123 141 for f in repo._phasedefaults:
124 142 roots = f(repo, roots)
125 143 repo._dirtyphases = True
144 if _filterunknown(repo.ui, repo.changelog, roots):
145 repo._dirtyphases = True
126 146 return roots
127 147
128 148 def writeroots(repo):
129 149 """Write phase roots from disk"""
130 150 f = repo.sopener('phaseroots', 'w', atomictemp=True)
131 151 try:
132 152 for phase, roots in enumerate(repo._phaseroots):
133 153 for h in roots:
134 154 f.write('%i %s\n' % (phase, hex(h)))
135 155 repo._dirtyphases = False
136 156 finally:
137 157 f.close()
138 158
139 def filterunknown(repo, phaseroots=None):
140 """remove unknown nodes from the phase boundary
141
142 no data is lost as unknown node only old data for their descentants
143 """
144 if phaseroots is None:
145 phaseroots = repo._phaseroots
146 nodemap = repo.changelog.nodemap # to filter unknown nodes
147 for phase, nodes in enumerate(phaseroots):
148 missing = [node for node in nodes if node not in nodemap]
149 if missing:
150 for mnode in missing:
151 repo.ui.debug(
152 'removing unknown node %s from %i-phase boundary\n'
153 % (short(mnode), phase))
154 nodes.symmetric_difference_update(missing)
155 repo._dirtyphases = True
156
157 159 def advanceboundary(repo, targetphase, nodes):
158 160 """Add nodes to a phase changing other nodes phases if necessary.
159 161
160 162 This function move boundary *forward* this means that all nodes are set
161 163 in the target phase or kept in a *lower* phase.
162 164
163 165 Simplify boundary to contains phase roots only."""
164 166 delroots = [] # set of root deleted by this path
165 167 for phase in xrange(targetphase + 1, len(allphases)):
166 168 # filter nodes that are not in a compatible phase already
167 169 # XXX rev phase cache might have been invalidated by a previous loop
168 170 # XXX we need to be smarter here
169 171 nodes = [n for n in nodes if repo[n].phase() >= phase]
170 172 if not nodes:
171 173 break # no roots to move anymore
172 174 roots = repo._phaseroots[phase]
173 175 olds = roots.copy()
174 176 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
175 177 roots.clear()
176 178 roots.update(ctx.node() for ctx in ctxs)
177 179 if olds != roots:
178 180 # invalidate cache (we probably could be smarter here
179 181 if '_phaserev' in vars(repo):
180 182 del repo._phaserev
181 183 repo._dirtyphases = True
182 184 # some roots may need to be declared for lower phases
183 185 delroots.extend(olds - roots)
184 186 # declare deleted root in the target phase
185 187 if targetphase != 0:
186 188 retractboundary(repo, targetphase, delroots)
187 189
188 190
189 191 def retractboundary(repo, targetphase, nodes):
190 192 """Set nodes back to a phase changing other nodes phases if necessary.
191 193
192 194 This function move boundary *backward* this means that all nodes are set
193 195 in the target phase or kept in a *higher* phase.
194 196
195 197 Simplify boundary to contains phase roots only."""
196 198 currentroots = repo._phaseroots[targetphase]
197 199 newroots = [n for n in nodes if repo[n].phase() < targetphase]
198 200 if newroots:
199 201 currentroots.update(newroots)
200 202 ctxs = repo.set('roots(%ln::)', currentroots)
201 203 currentroots.intersection_update(ctx.node() for ctx in ctxs)
202 204 if '_phaserev' in vars(repo):
203 205 del repo._phaserev
204 206 repo._dirtyphases = True
205 207
206 208
207 209 def listphases(repo):
208 210 """List phases root for serialisation over pushkey"""
209 211 keys = {}
210 212 value = '%i' % draft
211 213 for root in repo._phaseroots[draft]:
212 214 keys[hex(root)] = value
213 215
214 216 if repo.ui.configbool('phases', 'publish', True):
215 217 # Add an extra data to let remote know we are a publishing repo.
216 218 # Publishing repo can't just pretend they are old repo. When pushing to
217 219 # a publishing repo, the client still need to push phase boundary
218 220 #
219 221 # Push do not only push changeset. It also push phase data. New
220 222 # phase data may apply to common changeset which won't be push (as they
221 223 # are common). Here is a very simple example:
222 224 #
223 225 # 1) repo A push changeset X as draft to repo B
224 226 # 2) repo B make changeset X public
225 227 # 3) repo B push to repo A. X is not pushed but the data that X as now
226 228 # public should
227 229 #
228 230 # The server can't handle it on it's own as it has no idea of client
229 231 # phase data.
230 232 keys['publishing'] = 'True'
231 233 return keys
232 234
233 235 def pushphase(repo, nhex, oldphasestr, newphasestr):
234 236 """List phases root for serialisation over pushkey"""
235 237 lock = repo.lock()
236 238 try:
237 239 currentphase = repo[nhex].phase()
238 240 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
239 241 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
240 242 if currentphase == oldphase and newphase < oldphase:
241 243 advanceboundary(repo, newphase, [bin(nhex)])
242 244 return 1
243 245 elif currentphase == newphase:
244 246 # raced, but got correct result
245 247 return 1
246 248 else:
247 249 return 0
248 250 finally:
249 251 lock.release()
250 252
251 253 def visibleheads(repo):
252 254 """return the set of visible head of this repo"""
253 255 # XXX we want a cache on this
254 256 sroots = repo._phaseroots[secret]
255 257 if sroots:
256 258 # XXX very slow revset. storing heads or secret "boundary" would help.
257 259 revset = repo.set('heads(not (%ln::))', sroots)
258 260
259 261 vheads = [ctx.node() for ctx in revset]
260 262 if not vheads:
261 263 vheads.append(nullid)
262 264 else:
263 265 vheads = repo.heads()
264 266 return vheads
265 267
266 268 def visiblebranchmap(repo):
267 269 """return a branchmap for the visible set"""
268 270 # XXX Recomputing this data on the fly is very slow. We should build a
269 271 # XXX cached version while computin the standard branchmap version.
270 272 sroots = repo._phaseroots[secret]
271 273 if sroots:
272 274 vbranchmap = {}
273 275 for branch, nodes in repo.branchmap().iteritems():
274 276 # search for secret heads.
275 277 for n in nodes:
276 278 if repo[n].phase() >= secret:
277 279 nodes = None
278 280 break
279 281 # if secreat heads where found we must compute them again
280 282 if nodes is None:
281 283 s = repo.set('heads(branch(%s) - secret())', branch)
282 284 nodes = [c.node() for c in s]
283 285 vbranchmap[branch] = nodes
284 286 else:
285 287 vbranchmap = repo.branchmap()
286 288 return vbranchmap
287 289
288 290 def analyzeremotephases(repo, subset, roots):
289 291 """Compute phases heads and root in a subset of node from root dict
290 292
291 293 * subset is heads of the subset
292 294 * roots is {<nodeid> => phase} mapping. key and value are string.
293 295
294 296 Accept unknown element input
295 297 """
296 298 # build list from dictionary
297 299 draftroots = []
298 300 nodemap = repo.changelog.nodemap # to filter unknown nodes
299 301 for nhex, phase in roots.iteritems():
300 302 if nhex == 'publishing': # ignore data related to publish option
301 303 continue
302 304 node = bin(nhex)
303 305 phase = int(phase)
304 306 if phase == 0:
305 307 if node != nullid:
306 308 repo.ui.warn(_('ignoring inconsistent public root'
307 309 ' from remote: %s\n') % nhex)
308 310 elif phase == 1:
309 311 if node in nodemap:
310 312 draftroots.append(node)
311 313 else:
312 314 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
313 315 % (phase, nhex))
314 316 # compute heads
315 317 publicheads = newheads(repo, subset, draftroots)
316 318 return publicheads, draftroots
317 319
318 320 def newheads(repo, heads, roots):
319 321 """compute new head of a subset minus another
320 322
321 323 * `heads`: define the first subset
322 324 * `rroots`: define the second we substract to the first"""
323 325 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
324 326 heads, roots, roots, heads)
325 327 return [c.node() for c in revset]
326 328
327 329
328 330 def newcommitphase(ui):
329 331 """helper to get the target phase of new commit
330 332
331 333 Handle all possible values for the phases.new-commit options.
332 334
333 335 """
334 336 v = ui.config('phases', 'new-commit', draft)
335 337 try:
336 338 return phasenames.index(v)
337 339 except ValueError:
338 340 try:
339 341 return int(v)
340 342 except ValueError:
341 343 msg = _("phases.new-commit: not a valid phase name ('%s')")
342 344 raise error.ConfigError(msg % v)
343 345
General Comments 0
You need to be logged in to leave comments. Login now