##// END OF EJS Templates
phases: allow phase name in phases.new-commit settings...
Pierre-Yves David -
r16030:30840667 stable
parent child Browse files
Show More
@@ -1,2311 +1,2310 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39 self._dirtyphases = False
40 40 # A list of callback to shape the phase if no data were found.
41 41 # Callback are in the form: func(repo, roots) --> processed root.
42 42 # This list it to be filled by extension during repo setup
43 43 self._phasedefaults = []
44 44
45 45 try:
46 46 self.ui.readconfig(self.join("hgrc"), self.root)
47 47 extensions.loadall(self.ui)
48 48 except IOError:
49 49 pass
50 50
51 51 if not os.path.isdir(self.path):
52 52 if create:
53 53 if not os.path.exists(path):
54 54 util.makedirs(path)
55 55 util.makedir(self.path, notindexed=True)
56 56 requirements = ["revlogv1"]
57 57 if self.ui.configbool('format', 'usestore', True):
58 58 os.mkdir(os.path.join(self.path, "store"))
59 59 requirements.append("store")
60 60 if self.ui.configbool('format', 'usefncache', True):
61 61 requirements.append("fncache")
62 62 if self.ui.configbool('format', 'dotencode', True):
63 63 requirements.append('dotencode')
64 64 # create an invalid changelog
65 65 self.opener.append(
66 66 "00changelog.i",
67 67 '\0\0\0\2' # represents revlogv2
68 68 ' dummy changelog to prevent using the old repo layout'
69 69 )
70 70 if self.ui.configbool('format', 'generaldelta', False):
71 71 requirements.append("generaldelta")
72 72 requirements = set(requirements)
73 73 else:
74 74 raise error.RepoError(_("repository %s not found") % path)
75 75 elif create:
76 76 raise error.RepoError(_("repository %s already exists") % path)
77 77 else:
78 78 try:
79 79 requirements = scmutil.readrequires(self.opener, self.supported)
80 80 except IOError, inst:
81 81 if inst.errno != errno.ENOENT:
82 82 raise
83 83 requirements = set()
84 84
85 85 self.sharedpath = self.path
86 86 try:
87 87 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
88 88 if not os.path.exists(s):
89 89 raise error.RepoError(
90 90 _('.hg/sharedpath points to nonexistent directory %s') % s)
91 91 self.sharedpath = s
92 92 except IOError, inst:
93 93 if inst.errno != errno.ENOENT:
94 94 raise
95 95
96 96 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
97 97 self.spath = self.store.path
98 98 self.sopener = self.store.opener
99 99 self.sjoin = self.store.join
100 100 self.opener.createmode = self.store.createmode
101 101 self._applyrequirements(requirements)
102 102 if create:
103 103 self._writerequirements()
104 104
105 105
106 106 self._branchcache = None
107 107 self._branchcachetip = None
108 108 self.filterpats = {}
109 109 self._datafilters = {}
110 110 self._transref = self._lockref = self._wlockref = None
111 111
112 112 # A cache for various files under .hg/ that tracks file changes,
113 113 # (used by the filecache decorator)
114 114 #
115 115 # Maps a property name to its util.filecacheentry
116 116 self._filecache = {}
117 117
118 118 def _applyrequirements(self, requirements):
119 119 self.requirements = requirements
120 120 openerreqs = set(('revlogv1', 'generaldelta'))
121 121 self.sopener.options = dict((r, 1) for r in requirements
122 122 if r in openerreqs)
123 123
124 124 def _writerequirements(self):
125 125 reqfile = self.opener("requires", "w")
126 126 for r in self.requirements:
127 127 reqfile.write("%s\n" % r)
128 128 reqfile.close()
129 129
130 130 def _checknested(self, path):
131 131 """Determine if path is a legal nested repository."""
132 132 if not path.startswith(self.root):
133 133 return False
134 134 subpath = path[len(self.root) + 1:]
135 135 normsubpath = util.pconvert(subpath)
136 136
137 137 # XXX: Checking against the current working copy is wrong in
138 138 # the sense that it can reject things like
139 139 #
140 140 # $ hg cat -r 10 sub/x.txt
141 141 #
142 142 # if sub/ is no longer a subrepository in the working copy
143 143 # parent revision.
144 144 #
145 145 # However, it can of course also allow things that would have
146 146 # been rejected before, such as the above cat command if sub/
147 147 # is a subrepository now, but was a normal directory before.
148 148 # The old path auditor would have rejected by mistake since it
149 149 # panics when it sees sub/.hg/.
150 150 #
151 151 # All in all, checking against the working copy seems sensible
152 152 # since we want to prevent access to nested repositories on
153 153 # the filesystem *now*.
154 154 ctx = self[None]
155 155 parts = util.splitpath(subpath)
156 156 while parts:
157 157 prefix = '/'.join(parts)
158 158 if prefix in ctx.substate:
159 159 if prefix == normsubpath:
160 160 return True
161 161 else:
162 162 sub = ctx.sub(prefix)
163 163 return sub.checknested(subpath[len(prefix) + 1:])
164 164 else:
165 165 parts.pop()
166 166 return False
167 167
168 168 @filecache('bookmarks')
169 169 def _bookmarks(self):
170 170 return bookmarks.read(self)
171 171
172 172 @filecache('bookmarks.current')
173 173 def _bookmarkcurrent(self):
174 174 return bookmarks.readcurrent(self)
175 175
176 176 def _writebookmarks(self, marks):
177 177 bookmarks.write(self)
178 178
179 179 @filecache('phaseroots', True)
180 180 def _phaseroots(self):
181 181 self._dirtyphases = False
182 182 phaseroots = phases.readroots(self)
183 183 phases.filterunknown(self, phaseroots)
184 184 return phaseroots
185 185
186 186 @propertycache
187 187 def _phaserev(self):
188 188 cache = [phases.public] * len(self)
189 189 for phase in phases.trackedphases:
190 190 roots = map(self.changelog.rev, self._phaseroots[phase])
191 191 if roots:
192 192 for rev in roots:
193 193 cache[rev] = phase
194 194 for rev in self.changelog.descendants(*roots):
195 195 cache[rev] = phase
196 196 return cache
197 197
198 198 @filecache('00changelog.i', True)
199 199 def changelog(self):
200 200 c = changelog.changelog(self.sopener)
201 201 if 'HG_PENDING' in os.environ:
202 202 p = os.environ['HG_PENDING']
203 203 if p.startswith(self.root):
204 204 c.readpending('00changelog.i.a')
205 205 return c
206 206
207 207 @filecache('00manifest.i', True)
208 208 def manifest(self):
209 209 return manifest.manifest(self.sopener)
210 210
211 211 @filecache('dirstate')
212 212 def dirstate(self):
213 213 warned = [0]
214 214 def validate(node):
215 215 try:
216 216 self.changelog.rev(node)
217 217 return node
218 218 except error.LookupError:
219 219 if not warned[0]:
220 220 warned[0] = True
221 221 self.ui.warn(_("warning: ignoring unknown"
222 222 " working parent %s!\n") % short(node))
223 223 return nullid
224 224
225 225 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
226 226
227 227 def __getitem__(self, changeid):
228 228 if changeid is None:
229 229 return context.workingctx(self)
230 230 return context.changectx(self, changeid)
231 231
232 232 def __contains__(self, changeid):
233 233 try:
234 234 return bool(self.lookup(changeid))
235 235 except error.RepoLookupError:
236 236 return False
237 237
238 238 def __nonzero__(self):
239 239 return True
240 240
241 241 def __len__(self):
242 242 return len(self.changelog)
243 243
244 244 def __iter__(self):
245 245 for i in xrange(len(self)):
246 246 yield i
247 247
248 248 def revs(self, expr, *args):
249 249 '''Return a list of revisions matching the given revset'''
250 250 expr = revset.formatspec(expr, *args)
251 251 m = revset.match(None, expr)
252 252 return [r for r in m(self, range(len(self)))]
253 253
254 254 def set(self, expr, *args):
255 255 '''
256 256 Yield a context for each matching revision, after doing arg
257 257 replacement via revset.formatspec
258 258 '''
259 259 for r in self.revs(expr, *args):
260 260 yield self[r]
261 261
262 262 def url(self):
263 263 return 'file:' + self.root
264 264
265 265 def hook(self, name, throw=False, **args):
266 266 return hook.hook(self.ui, self, name, throw, **args)
267 267
268 268 tag_disallowed = ':\r\n'
269 269
270 270 def _tag(self, names, node, message, local, user, date, extra={}):
271 271 if isinstance(names, str):
272 272 allchars = names
273 273 names = (names,)
274 274 else:
275 275 allchars = ''.join(names)
276 276 for c in self.tag_disallowed:
277 277 if c in allchars:
278 278 raise util.Abort(_('%r cannot be used in a tag name') % c)
279 279
280 280 branches = self.branchmap()
281 281 for name in names:
282 282 self.hook('pretag', throw=True, node=hex(node), tag=name,
283 283 local=local)
284 284 if name in branches:
285 285 self.ui.warn(_("warning: tag %s conflicts with existing"
286 286 " branch name\n") % name)
287 287
288 288 def writetags(fp, names, munge, prevtags):
289 289 fp.seek(0, 2)
290 290 if prevtags and prevtags[-1] != '\n':
291 291 fp.write('\n')
292 292 for name in names:
293 293 m = munge and munge(name) or name
294 294 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
295 295 old = self.tags().get(name, nullid)
296 296 fp.write('%s %s\n' % (hex(old), m))
297 297 fp.write('%s %s\n' % (hex(node), m))
298 298 fp.close()
299 299
300 300 prevtags = ''
301 301 if local:
302 302 try:
303 303 fp = self.opener('localtags', 'r+')
304 304 except IOError:
305 305 fp = self.opener('localtags', 'a')
306 306 else:
307 307 prevtags = fp.read()
308 308
309 309 # local tags are stored in the current charset
310 310 writetags(fp, names, None, prevtags)
311 311 for name in names:
312 312 self.hook('tag', node=hex(node), tag=name, local=local)
313 313 return
314 314
315 315 try:
316 316 fp = self.wfile('.hgtags', 'rb+')
317 317 except IOError, e:
318 318 if e.errno != errno.ENOENT:
319 319 raise
320 320 fp = self.wfile('.hgtags', 'ab')
321 321 else:
322 322 prevtags = fp.read()
323 323
324 324 # committed tags are stored in UTF-8
325 325 writetags(fp, names, encoding.fromlocal, prevtags)
326 326
327 327 fp.close()
328 328
329 329 self.invalidatecaches()
330 330
331 331 if '.hgtags' not in self.dirstate:
332 332 self[None].add(['.hgtags'])
333 333
334 334 m = matchmod.exact(self.root, '', ['.hgtags'])
335 335 tagnode = self.commit(message, user, date, extra=extra, match=m)
336 336
337 337 for name in names:
338 338 self.hook('tag', node=hex(node), tag=name, local=local)
339 339
340 340 return tagnode
341 341
342 342 def tag(self, names, node, message, local, user, date):
343 343 '''tag a revision with one or more symbolic names.
344 344
345 345 names is a list of strings or, when adding a single tag, names may be a
346 346 string.
347 347
348 348 if local is True, the tags are stored in a per-repository file.
349 349 otherwise, they are stored in the .hgtags file, and a new
350 350 changeset is committed with the change.
351 351
352 352 keyword arguments:
353 353
354 354 local: whether to store tags in non-version-controlled file
355 355 (default False)
356 356
357 357 message: commit message to use if committing
358 358
359 359 user: name of user to use if committing
360 360
361 361 date: date tuple to use if committing'''
362 362
363 363 if not local:
364 364 for x in self.status()[:5]:
365 365 if '.hgtags' in x:
366 366 raise util.Abort(_('working copy of .hgtags is changed '
367 367 '(please commit .hgtags manually)'))
368 368
369 369 self.tags() # instantiate the cache
370 370 self._tag(names, node, message, local, user, date)
371 371
372 372 @propertycache
373 373 def _tagscache(self):
374 374 '''Returns a tagscache object that contains various tags related caches.'''
375 375
376 376 # This simplifies its cache management by having one decorated
377 377 # function (this one) and the rest simply fetch things from it.
378 378 class tagscache(object):
379 379 def __init__(self):
380 380 # These two define the set of tags for this repository. tags
381 381 # maps tag name to node; tagtypes maps tag name to 'global' or
382 382 # 'local'. (Global tags are defined by .hgtags across all
383 383 # heads, and local tags are defined in .hg/localtags.)
384 384 # They constitute the in-memory cache of tags.
385 385 self.tags = self.tagtypes = None
386 386
387 387 self.nodetagscache = self.tagslist = None
388 388
389 389 cache = tagscache()
390 390 cache.tags, cache.tagtypes = self._findtags()
391 391
392 392 return cache
393 393
394 394 def tags(self):
395 395 '''return a mapping of tag to node'''
396 396 return self._tagscache.tags
397 397
398 398 def _findtags(self):
399 399 '''Do the hard work of finding tags. Return a pair of dicts
400 400 (tags, tagtypes) where tags maps tag name to node, and tagtypes
401 401 maps tag name to a string like \'global\' or \'local\'.
402 402 Subclasses or extensions are free to add their own tags, but
403 403 should be aware that the returned dicts will be retained for the
404 404 duration of the localrepo object.'''
405 405
406 406 # XXX what tagtype should subclasses/extensions use? Currently
407 407 # mq and bookmarks add tags, but do not set the tagtype at all.
408 408 # Should each extension invent its own tag type? Should there
409 409 # be one tagtype for all such "virtual" tags? Or is the status
410 410 # quo fine?
411 411
412 412 alltags = {} # map tag name to (node, hist)
413 413 tagtypes = {}
414 414
415 415 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
416 416 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
417 417
418 418 # Build the return dicts. Have to re-encode tag names because
419 419 # the tags module always uses UTF-8 (in order not to lose info
420 420 # writing to the cache), but the rest of Mercurial wants them in
421 421 # local encoding.
422 422 tags = {}
423 423 for (name, (node, hist)) in alltags.iteritems():
424 424 if node != nullid:
425 425 try:
426 426 # ignore tags to unknown nodes
427 427 self.changelog.lookup(node)
428 428 tags[encoding.tolocal(name)] = node
429 429 except error.LookupError:
430 430 pass
431 431 tags['tip'] = self.changelog.tip()
432 432 tagtypes = dict([(encoding.tolocal(name), value)
433 433 for (name, value) in tagtypes.iteritems()])
434 434 return (tags, tagtypes)
435 435
436 436 def tagtype(self, tagname):
437 437 '''
438 438 return the type of the given tag. result can be:
439 439
440 440 'local' : a local tag
441 441 'global' : a global tag
442 442 None : tag does not exist
443 443 '''
444 444
445 445 return self._tagscache.tagtypes.get(tagname)
446 446
447 447 def tagslist(self):
448 448 '''return a list of tags ordered by revision'''
449 449 if not self._tagscache.tagslist:
450 450 l = []
451 451 for t, n in self.tags().iteritems():
452 452 r = self.changelog.rev(n)
453 453 l.append((r, t, n))
454 454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
455 455
456 456 return self._tagscache.tagslist
457 457
458 458 def nodetags(self, node):
459 459 '''return the tags associated with a node'''
460 460 if not self._tagscache.nodetagscache:
461 461 nodetagscache = {}
462 462 for t, n in self.tags().iteritems():
463 463 nodetagscache.setdefault(n, []).append(t)
464 464 for tags in nodetagscache.itervalues():
465 465 tags.sort()
466 466 self._tagscache.nodetagscache = nodetagscache
467 467 return self._tagscache.nodetagscache.get(node, [])
468 468
469 469 def nodebookmarks(self, node):
470 470 marks = []
471 471 for bookmark, n in self._bookmarks.iteritems():
472 472 if n == node:
473 473 marks.append(bookmark)
474 474 return sorted(marks)
475 475
476 476 def _branchtags(self, partial, lrev):
477 477 # TODO: rename this function?
478 478 tiprev = len(self) - 1
479 479 if lrev != tiprev:
480 480 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
481 481 self._updatebranchcache(partial, ctxgen)
482 482 self._writebranchcache(partial, self.changelog.tip(), tiprev)
483 483
484 484 return partial
485 485
486 486 def updatebranchcache(self):
487 487 tip = self.changelog.tip()
488 488 if self._branchcache is not None and self._branchcachetip == tip:
489 489 return
490 490
491 491 oldtip = self._branchcachetip
492 492 self._branchcachetip = tip
493 493 if oldtip is None or oldtip not in self.changelog.nodemap:
494 494 partial, last, lrev = self._readbranchcache()
495 495 else:
496 496 lrev = self.changelog.rev(oldtip)
497 497 partial = self._branchcache
498 498
499 499 self._branchtags(partial, lrev)
500 500 # this private cache holds all heads (not just tips)
501 501 self._branchcache = partial
502 502
503 503 def branchmap(self):
504 504 '''returns a dictionary {branch: [branchheads]}'''
505 505 self.updatebranchcache()
506 506 return self._branchcache
507 507
508 508 def branchtags(self):
509 509 '''return a dict where branch names map to the tipmost head of
510 510 the branch, open heads come before closed'''
511 511 bt = {}
512 512 for bn, heads in self.branchmap().iteritems():
513 513 tip = heads[-1]
514 514 for h in reversed(heads):
515 515 if 'close' not in self.changelog.read(h)[5]:
516 516 tip = h
517 517 break
518 518 bt[bn] = tip
519 519 return bt
520 520
521 521 def _readbranchcache(self):
522 522 partial = {}
523 523 try:
524 524 f = self.opener("cache/branchheads")
525 525 lines = f.read().split('\n')
526 526 f.close()
527 527 except (IOError, OSError):
528 528 return {}, nullid, nullrev
529 529
530 530 try:
531 531 last, lrev = lines.pop(0).split(" ", 1)
532 532 last, lrev = bin(last), int(lrev)
533 533 if lrev >= len(self) or self[lrev].node() != last:
534 534 # invalidate the cache
535 535 raise ValueError('invalidating branch cache (tip differs)')
536 536 for l in lines:
537 537 if not l:
538 538 continue
539 539 node, label = l.split(" ", 1)
540 540 label = encoding.tolocal(label.strip())
541 541 partial.setdefault(label, []).append(bin(node))
542 542 except KeyboardInterrupt:
543 543 raise
544 544 except Exception, inst:
545 545 if self.ui.debugflag:
546 546 self.ui.warn(str(inst), '\n')
547 547 partial, last, lrev = {}, nullid, nullrev
548 548 return partial, last, lrev
549 549
550 550 def _writebranchcache(self, branches, tip, tiprev):
551 551 try:
552 552 f = self.opener("cache/branchheads", "w", atomictemp=True)
553 553 f.write("%s %s\n" % (hex(tip), tiprev))
554 554 for label, nodes in branches.iteritems():
555 555 for node in nodes:
556 556 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
557 557 f.close()
558 558 except (IOError, OSError):
559 559 pass
560 560
561 561 def _updatebranchcache(self, partial, ctxgen):
562 562 # collect new branch entries
563 563 newbranches = {}
564 564 for c in ctxgen:
565 565 newbranches.setdefault(c.branch(), []).append(c.node())
566 566 # if older branchheads are reachable from new ones, they aren't
567 567 # really branchheads. Note checking parents is insufficient:
568 568 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
569 569 for branch, newnodes in newbranches.iteritems():
570 570 bheads = partial.setdefault(branch, [])
571 571 bheads.extend(newnodes)
572 572 if len(bheads) <= 1:
573 573 continue
574 574 bheads = sorted(bheads, key=lambda x: self[x].rev())
575 575 # starting from tip means fewer passes over reachable
576 576 while newnodes:
577 577 latest = newnodes.pop()
578 578 if latest not in bheads:
579 579 continue
580 580 minbhrev = self[bheads[0]].node()
581 581 reachable = self.changelog.reachable(latest, minbhrev)
582 582 reachable.remove(latest)
583 583 if reachable:
584 584 bheads = [b for b in bheads if b not in reachable]
585 585 partial[branch] = bheads
586 586
587 587 def lookup(self, key):
588 588 if isinstance(key, int):
589 589 return self.changelog.node(key)
590 590 elif key == '.':
591 591 return self.dirstate.p1()
592 592 elif key == 'null':
593 593 return nullid
594 594 elif key == 'tip':
595 595 return self.changelog.tip()
596 596 n = self.changelog._match(key)
597 597 if n:
598 598 return n
599 599 if key in self._bookmarks:
600 600 return self._bookmarks[key]
601 601 if key in self.tags():
602 602 return self.tags()[key]
603 603 if key in self.branchtags():
604 604 return self.branchtags()[key]
605 605 n = self.changelog._partialmatch(key)
606 606 if n:
607 607 return n
608 608
609 609 # can't find key, check if it might have come from damaged dirstate
610 610 if key in self.dirstate.parents():
611 611 raise error.Abort(_("working directory has unknown parent '%s'!")
612 612 % short(key))
613 613 try:
614 614 if len(key) == 20:
615 615 key = hex(key)
616 616 except TypeError:
617 617 pass
618 618 raise error.RepoLookupError(_("unknown revision '%s'") % key)
619 619
620 620 def lookupbranch(self, key, remote=None):
621 621 repo = remote or self
622 622 if key in repo.branchmap():
623 623 return key
624 624
625 625 repo = (remote and remote.local()) and remote or self
626 626 return repo[key].branch()
627 627
628 628 def known(self, nodes):
629 629 nm = self.changelog.nodemap
630 630 result = []
631 631 for n in nodes:
632 632 r = nm.get(n)
633 633 resp = not (r is None or self._phaserev[r] >= phases.secret)
634 634 result.append(resp)
635 635 return result
636 636
637 637 def local(self):
638 638 return self
639 639
640 640 def join(self, f):
641 641 return os.path.join(self.path, f)
642 642
643 643 def wjoin(self, f):
644 644 return os.path.join(self.root, f)
645 645
646 646 def file(self, f):
647 647 if f[0] == '/':
648 648 f = f[1:]
649 649 return filelog.filelog(self.sopener, f)
650 650
651 651 def changectx(self, changeid):
652 652 return self[changeid]
653 653
654 654 def parents(self, changeid=None):
655 655 '''get list of changectxs for parents of changeid'''
656 656 return self[changeid].parents()
657 657
658 658 def filectx(self, path, changeid=None, fileid=None):
659 659 """changeid can be a changeset revision, node, or tag.
660 660 fileid can be a file revision or node."""
661 661 return context.filectx(self, path, changeid, fileid)
662 662
663 663 def getcwd(self):
664 664 return self.dirstate.getcwd()
665 665
666 666 def pathto(self, f, cwd=None):
667 667 return self.dirstate.pathto(f, cwd)
668 668
669 669 def wfile(self, f, mode='r'):
670 670 return self.wopener(f, mode)
671 671
672 672 def _link(self, f):
673 673 return os.path.islink(self.wjoin(f))
674 674
675 675 def _loadfilter(self, filter):
676 676 if filter not in self.filterpats:
677 677 l = []
678 678 for pat, cmd in self.ui.configitems(filter):
679 679 if cmd == '!':
680 680 continue
681 681 mf = matchmod.match(self.root, '', [pat])
682 682 fn = None
683 683 params = cmd
684 684 for name, filterfn in self._datafilters.iteritems():
685 685 if cmd.startswith(name):
686 686 fn = filterfn
687 687 params = cmd[len(name):].lstrip()
688 688 break
689 689 if not fn:
690 690 fn = lambda s, c, **kwargs: util.filter(s, c)
691 691 # Wrap old filters not supporting keyword arguments
692 692 if not inspect.getargspec(fn)[2]:
693 693 oldfn = fn
694 694 fn = lambda s, c, **kwargs: oldfn(s, c)
695 695 l.append((mf, fn, params))
696 696 self.filterpats[filter] = l
697 697 return self.filterpats[filter]
698 698
699 699 def _filter(self, filterpats, filename, data):
700 700 for mf, fn, cmd in filterpats:
701 701 if mf(filename):
702 702 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
703 703 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
704 704 break
705 705
706 706 return data
707 707
708 708 @propertycache
709 709 def _encodefilterpats(self):
710 710 return self._loadfilter('encode')
711 711
712 712 @propertycache
713 713 def _decodefilterpats(self):
714 714 return self._loadfilter('decode')
715 715
716 716 def adddatafilter(self, name, filter):
717 717 self._datafilters[name] = filter
718 718
719 719 def wread(self, filename):
720 720 if self._link(filename):
721 721 data = os.readlink(self.wjoin(filename))
722 722 else:
723 723 data = self.wopener.read(filename)
724 724 return self._filter(self._encodefilterpats, filename, data)
725 725
726 726 def wwrite(self, filename, data, flags):
727 727 data = self._filter(self._decodefilterpats, filename, data)
728 728 if 'l' in flags:
729 729 self.wopener.symlink(data, filename)
730 730 else:
731 731 self.wopener.write(filename, data)
732 732 if 'x' in flags:
733 733 util.setflags(self.wjoin(filename), False, True)
734 734
735 735 def wwritedata(self, filename, data):
736 736 return self._filter(self._decodefilterpats, filename, data)
737 737
738 738 def transaction(self, desc):
739 739 tr = self._transref and self._transref() or None
740 740 if tr and tr.running():
741 741 return tr.nest()
742 742
743 743 # abort here if the journal already exists
744 744 if os.path.exists(self.sjoin("journal")):
745 745 raise error.RepoError(
746 746 _("abandoned transaction found - run hg recover"))
747 747
748 748 journalfiles = self._writejournal(desc)
749 749 renames = [(x, undoname(x)) for x in journalfiles]
750 750
751 751 tr = transaction.transaction(self.ui.warn, self.sopener,
752 752 self.sjoin("journal"),
753 753 aftertrans(renames),
754 754 self.store.createmode)
755 755 self._transref = weakref.ref(tr)
756 756 return tr
757 757
758 758 def _writejournal(self, desc):
759 759 # save dirstate for rollback
760 760 try:
761 761 ds = self.opener.read("dirstate")
762 762 except IOError:
763 763 ds = ""
764 764 self.opener.write("journal.dirstate", ds)
765 765 self.opener.write("journal.branch",
766 766 encoding.fromlocal(self.dirstate.branch()))
767 767 self.opener.write("journal.desc",
768 768 "%d\n%s\n" % (len(self), desc))
769 769
770 770 bkname = self.join('bookmarks')
771 771 if os.path.exists(bkname):
772 772 util.copyfile(bkname, self.join('journal.bookmarks'))
773 773 else:
774 774 self.opener.write('journal.bookmarks', '')
775 775 phasesname = self.sjoin('phaseroots')
776 776 if os.path.exists(phasesname):
777 777 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
778 778 else:
779 779 self.sopener.write('journal.phaseroots', '')
780 780
781 781 return (self.sjoin('journal'), self.join('journal.dirstate'),
782 782 self.join('journal.branch'), self.join('journal.desc'),
783 783 self.join('journal.bookmarks'),
784 784 self.sjoin('journal.phaseroots'))
785 785
786 786 def recover(self):
787 787 lock = self.lock()
788 788 try:
789 789 if os.path.exists(self.sjoin("journal")):
790 790 self.ui.status(_("rolling back interrupted transaction\n"))
791 791 transaction.rollback(self.sopener, self.sjoin("journal"),
792 792 self.ui.warn)
793 793 self.invalidate()
794 794 return True
795 795 else:
796 796 self.ui.warn(_("no interrupted transaction available\n"))
797 797 return False
798 798 finally:
799 799 lock.release()
800 800
801 801 def rollback(self, dryrun=False, force=False):
802 802 wlock = lock = None
803 803 try:
804 804 wlock = self.wlock()
805 805 lock = self.lock()
806 806 if os.path.exists(self.sjoin("undo")):
807 807 return self._rollback(dryrun, force)
808 808 else:
809 809 self.ui.warn(_("no rollback information available\n"))
810 810 return 1
811 811 finally:
812 812 release(lock, wlock)
813 813
814 814 def _rollback(self, dryrun, force):
815 815 ui = self.ui
816 816 try:
817 817 args = self.opener.read('undo.desc').splitlines()
818 818 (oldlen, desc, detail) = (int(args[0]), args[1], None)
819 819 if len(args) >= 3:
820 820 detail = args[2]
821 821 oldtip = oldlen - 1
822 822
823 823 if detail and ui.verbose:
824 824 msg = (_('repository tip rolled back to revision %s'
825 825 ' (undo %s: %s)\n')
826 826 % (oldtip, desc, detail))
827 827 else:
828 828 msg = (_('repository tip rolled back to revision %s'
829 829 ' (undo %s)\n')
830 830 % (oldtip, desc))
831 831 except IOError:
832 832 msg = _('rolling back unknown transaction\n')
833 833 desc = None
834 834
835 835 if not force and self['.'] != self['tip'] and desc == 'commit':
836 836 raise util.Abort(
837 837 _('rollback of last commit while not checked out '
838 838 'may lose data'), hint=_('use -f to force'))
839 839
840 840 ui.status(msg)
841 841 if dryrun:
842 842 return 0
843 843
844 844 parents = self.dirstate.parents()
845 845 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
846 846 if os.path.exists(self.join('undo.bookmarks')):
847 847 util.rename(self.join('undo.bookmarks'),
848 848 self.join('bookmarks'))
849 849 if os.path.exists(self.sjoin('undo.phaseroots')):
850 850 util.rename(self.sjoin('undo.phaseroots'),
851 851 self.sjoin('phaseroots'))
852 852 self.invalidate()
853 853
854 854 parentgone = (parents[0] not in self.changelog.nodemap or
855 855 parents[1] not in self.changelog.nodemap)
856 856 if parentgone:
857 857 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
858 858 try:
859 859 branch = self.opener.read('undo.branch')
860 860 self.dirstate.setbranch(branch)
861 861 except IOError:
862 862 ui.warn(_('named branch could not be reset: '
863 863 'current branch is still \'%s\'\n')
864 864 % self.dirstate.branch())
865 865
866 866 self.dirstate.invalidate()
867 867 parents = tuple([p.rev() for p in self.parents()])
868 868 if len(parents) > 1:
869 869 ui.status(_('working directory now based on '
870 870 'revisions %d and %d\n') % parents)
871 871 else:
872 872 ui.status(_('working directory now based on '
873 873 'revision %d\n') % parents)
874 874 self.destroyed()
875 875 return 0
876 876
877 877 def invalidatecaches(self):
878 878 def delcache(name):
879 879 try:
880 880 delattr(self, name)
881 881 except AttributeError:
882 882 pass
883 883
884 884 delcache('_tagscache')
885 885 delcache('_phaserev')
886 886
887 887 self._branchcache = None # in UTF-8
888 888 self._branchcachetip = None
889 889
890 890 def invalidatedirstate(self):
891 891 '''Invalidates the dirstate, causing the next call to dirstate
892 892 to check if it was modified since the last time it was read,
893 893 rereading it if it has.
894 894
895 895 This is different to dirstate.invalidate() that it doesn't always
896 896 rereads the dirstate. Use dirstate.invalidate() if you want to
897 897 explicitly read the dirstate again (i.e. restoring it to a previous
898 898 known good state).'''
899 899 try:
900 900 delattr(self, 'dirstate')
901 901 except AttributeError:
902 902 pass
903 903
904 904 def invalidate(self):
905 905 for k in self._filecache:
906 906 # dirstate is invalidated separately in invalidatedirstate()
907 907 if k == 'dirstate':
908 908 continue
909 909
910 910 try:
911 911 delattr(self, k)
912 912 except AttributeError:
913 913 pass
914 914 self.invalidatecaches()
915 915
916 916 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
917 917 try:
918 918 l = lock.lock(lockname, 0, releasefn, desc=desc)
919 919 except error.LockHeld, inst:
920 920 if not wait:
921 921 raise
922 922 self.ui.warn(_("waiting for lock on %s held by %r\n") %
923 923 (desc, inst.locker))
924 924 # default to 600 seconds timeout
925 925 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
926 926 releasefn, desc=desc)
927 927 if acquirefn:
928 928 acquirefn()
929 929 return l
930 930
931 931 def _afterlock(self, callback):
932 932 """add a callback to the current repository lock.
933 933
934 934 The callback will be executed on lock release."""
935 935 l = self._lockref and self._lockref()
936 936 if l:
937 937 l.postrelease.append(callback)
938 938
939 939 def lock(self, wait=True):
940 940 '''Lock the repository store (.hg/store) and return a weak reference
941 941 to the lock. Use this before modifying the store (e.g. committing or
942 942 stripping). If you are opening a transaction, get a lock as well.)'''
943 943 l = self._lockref and self._lockref()
944 944 if l is not None and l.held:
945 945 l.lock()
946 946 return l
947 947
948 948 def unlock():
949 949 self.store.write()
950 950 if self._dirtyphases:
951 951 phases.writeroots(self)
952 952 for k, ce in self._filecache.items():
953 953 if k == 'dirstate':
954 954 continue
955 955 ce.refresh()
956 956
957 957 l = self._lock(self.sjoin("lock"), wait, unlock,
958 958 self.invalidate, _('repository %s') % self.origroot)
959 959 self._lockref = weakref.ref(l)
960 960 return l
961 961
962 962 def wlock(self, wait=True):
963 963 '''Lock the non-store parts of the repository (everything under
964 964 .hg except .hg/store) and return a weak reference to the lock.
965 965 Use this before modifying files in .hg.'''
966 966 l = self._wlockref and self._wlockref()
967 967 if l is not None and l.held:
968 968 l.lock()
969 969 return l
970 970
971 971 def unlock():
972 972 self.dirstate.write()
973 973 ce = self._filecache.get('dirstate')
974 974 if ce:
975 975 ce.refresh()
976 976
977 977 l = self._lock(self.join("wlock"), wait, unlock,
978 978 self.invalidatedirstate, _('working directory of %s') %
979 979 self.origroot)
980 980 self._wlockref = weakref.ref(l)
981 981 return l
982 982
983 983 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
984 984 """
985 985 commit an individual file as part of a larger transaction
986 986 """
987 987
988 988 fname = fctx.path()
989 989 text = fctx.data()
990 990 flog = self.file(fname)
991 991 fparent1 = manifest1.get(fname, nullid)
992 992 fparent2 = fparent2o = manifest2.get(fname, nullid)
993 993
994 994 meta = {}
995 995 copy = fctx.renamed()
996 996 if copy and copy[0] != fname:
997 997 # Mark the new revision of this file as a copy of another
998 998 # file. This copy data will effectively act as a parent
999 999 # of this new revision. If this is a merge, the first
1000 1000 # parent will be the nullid (meaning "look up the copy data")
1001 1001 # and the second one will be the other parent. For example:
1002 1002 #
1003 1003 # 0 --- 1 --- 3 rev1 changes file foo
1004 1004 # \ / rev2 renames foo to bar and changes it
1005 1005 # \- 2 -/ rev3 should have bar with all changes and
1006 1006 # should record that bar descends from
1007 1007 # bar in rev2 and foo in rev1
1008 1008 #
1009 1009 # this allows this merge to succeed:
1010 1010 #
1011 1011 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1012 1012 # \ / merging rev3 and rev4 should use bar@rev2
1013 1013 # \- 2 --- 4 as the merge base
1014 1014 #
1015 1015
1016 1016 cfname = copy[0]
1017 1017 crev = manifest1.get(cfname)
1018 1018 newfparent = fparent2
1019 1019
1020 1020 if manifest2: # branch merge
1021 1021 if fparent2 == nullid or crev is None: # copied on remote side
1022 1022 if cfname in manifest2:
1023 1023 crev = manifest2[cfname]
1024 1024 newfparent = fparent1
1025 1025
1026 1026 # find source in nearest ancestor if we've lost track
1027 1027 if not crev:
1028 1028 self.ui.debug(" %s: searching for copy revision for %s\n" %
1029 1029 (fname, cfname))
1030 1030 for ancestor in self[None].ancestors():
1031 1031 if cfname in ancestor:
1032 1032 crev = ancestor[cfname].filenode()
1033 1033 break
1034 1034
1035 1035 if crev:
1036 1036 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1037 1037 meta["copy"] = cfname
1038 1038 meta["copyrev"] = hex(crev)
1039 1039 fparent1, fparent2 = nullid, newfparent
1040 1040 else:
1041 1041 self.ui.warn(_("warning: can't find ancestor for '%s' "
1042 1042 "copied from '%s'!\n") % (fname, cfname))
1043 1043
1044 1044 elif fparent2 != nullid:
1045 1045 # is one parent an ancestor of the other?
1046 1046 fparentancestor = flog.ancestor(fparent1, fparent2)
1047 1047 if fparentancestor == fparent1:
1048 1048 fparent1, fparent2 = fparent2, nullid
1049 1049 elif fparentancestor == fparent2:
1050 1050 fparent2 = nullid
1051 1051
1052 1052 # is the file changed?
1053 1053 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1054 1054 changelist.append(fname)
1055 1055 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1056 1056
1057 1057 # are just the flags changed during merge?
1058 1058 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1059 1059 changelist.append(fname)
1060 1060
1061 1061 return fparent1
1062 1062
1063 1063 def commit(self, text="", user=None, date=None, match=None, force=False,
1064 1064 editor=False, extra={}):
1065 1065 """Add a new revision to current repository.
1066 1066
1067 1067 Revision information is gathered from the working directory,
1068 1068 match can be used to filter the committed files. If editor is
1069 1069 supplied, it is called to get a commit message.
1070 1070 """
1071 1071
1072 1072 def fail(f, msg):
1073 1073 raise util.Abort('%s: %s' % (f, msg))
1074 1074
1075 1075 if not match:
1076 1076 match = matchmod.always(self.root, '')
1077 1077
1078 1078 if not force:
1079 1079 vdirs = []
1080 1080 match.dir = vdirs.append
1081 1081 match.bad = fail
1082 1082
1083 1083 wlock = self.wlock()
1084 1084 try:
1085 1085 wctx = self[None]
1086 1086 merge = len(wctx.parents()) > 1
1087 1087
1088 1088 if (not force and merge and match and
1089 1089 (match.files() or match.anypats())):
1090 1090 raise util.Abort(_('cannot partially commit a merge '
1091 1091 '(do not specify files or patterns)'))
1092 1092
1093 1093 changes = self.status(match=match, clean=force)
1094 1094 if force:
1095 1095 changes[0].extend(changes[6]) # mq may commit unchanged files
1096 1096
1097 1097 # check subrepos
1098 1098 subs = []
1099 1099 removedsubs = set()
1100 1100 if '.hgsub' in wctx:
1101 1101 # only manage subrepos and .hgsubstate if .hgsub is present
1102 1102 for p in wctx.parents():
1103 1103 removedsubs.update(s for s in p.substate if match(s))
1104 1104 for s in wctx.substate:
1105 1105 removedsubs.discard(s)
1106 1106 if match(s) and wctx.sub(s).dirty():
1107 1107 subs.append(s)
1108 1108 if (subs or removedsubs):
1109 1109 if (not match('.hgsub') and
1110 1110 '.hgsub' in (wctx.modified() + wctx.added())):
1111 1111 raise util.Abort(
1112 1112 _("can't commit subrepos without .hgsub"))
1113 1113 if '.hgsubstate' not in changes[0]:
1114 1114 changes[0].insert(0, '.hgsubstate')
1115 1115 if '.hgsubstate' in changes[2]:
1116 1116 changes[2].remove('.hgsubstate')
1117 1117 elif '.hgsub' in changes[2]:
1118 1118 # clean up .hgsubstate when .hgsub is removed
1119 1119 if ('.hgsubstate' in wctx and
1120 1120 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1121 1121 changes[2].insert(0, '.hgsubstate')
1122 1122
1123 1123 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1124 1124 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1125 1125 if changedsubs:
1126 1126 raise util.Abort(_("uncommitted changes in subrepo %s")
1127 1127 % changedsubs[0],
1128 1128 hint=_("use --subrepos for recursive commit"))
1129 1129
1130 1130 # make sure all explicit patterns are matched
1131 1131 if not force and match.files():
1132 1132 matched = set(changes[0] + changes[1] + changes[2])
1133 1133
1134 1134 for f in match.files():
1135 1135 if f == '.' or f in matched or f in wctx.substate:
1136 1136 continue
1137 1137 if f in changes[3]: # missing
1138 1138 fail(f, _('file not found!'))
1139 1139 if f in vdirs: # visited directory
1140 1140 d = f + '/'
1141 1141 for mf in matched:
1142 1142 if mf.startswith(d):
1143 1143 break
1144 1144 else:
1145 1145 fail(f, _("no match under directory!"))
1146 1146 elif f not in self.dirstate:
1147 1147 fail(f, _("file not tracked!"))
1148 1148
1149 1149 if (not force and not extra.get("close") and not merge
1150 1150 and not (changes[0] or changes[1] or changes[2])
1151 1151 and wctx.branch() == wctx.p1().branch()):
1152 1152 return None
1153 1153
1154 1154 ms = mergemod.mergestate(self)
1155 1155 for f in changes[0]:
1156 1156 if f in ms and ms[f] == 'u':
1157 1157 raise util.Abort(_("unresolved merge conflicts "
1158 1158 "(see hg help resolve)"))
1159 1159
1160 1160 cctx = context.workingctx(self, text, user, date, extra, changes)
1161 1161 if editor:
1162 1162 cctx._text = editor(self, cctx, subs)
1163 1163 edited = (text != cctx._text)
1164 1164
1165 1165 # commit subs
1166 1166 if subs or removedsubs:
1167 1167 state = wctx.substate.copy()
1168 1168 for s in sorted(subs):
1169 1169 sub = wctx.sub(s)
1170 1170 self.ui.status(_('committing subrepository %s\n') %
1171 1171 subrepo.subrelpath(sub))
1172 1172 sr = sub.commit(cctx._text, user, date)
1173 1173 state[s] = (state[s][0], sr)
1174 1174 subrepo.writestate(self, state)
1175 1175
1176 1176 # Save commit message in case this transaction gets rolled back
1177 1177 # (e.g. by a pretxncommit hook). Leave the content alone on
1178 1178 # the assumption that the user will use the same editor again.
1179 1179 msgfn = self.savecommitmessage(cctx._text)
1180 1180
1181 1181 p1, p2 = self.dirstate.parents()
1182 1182 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1183 1183 try:
1184 1184 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1185 1185 ret = self.commitctx(cctx, True)
1186 1186 except:
1187 1187 if edited:
1188 1188 self.ui.write(
1189 1189 _('note: commit message saved in %s\n') % msgfn)
1190 1190 raise
1191 1191
1192 1192 # update bookmarks, dirstate and mergestate
1193 1193 bookmarks.update(self, p1, ret)
1194 1194 for f in changes[0] + changes[1]:
1195 1195 self.dirstate.normal(f)
1196 1196 for f in changes[2]:
1197 1197 self.dirstate.drop(f)
1198 1198 self.dirstate.setparents(ret)
1199 1199 ms.reset()
1200 1200 finally:
1201 1201 wlock.release()
1202 1202
1203 1203 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1204 1204 return ret
1205 1205
1206 1206 def commitctx(self, ctx, error=False):
1207 1207 """Add a new revision to current repository.
1208 1208 Revision information is passed via the context argument.
1209 1209 """
1210 1210
1211 1211 tr = lock = None
1212 1212 removed = list(ctx.removed())
1213 1213 p1, p2 = ctx.p1(), ctx.p2()
1214 1214 user = ctx.user()
1215 1215
1216 1216 lock = self.lock()
1217 1217 try:
1218 1218 tr = self.transaction("commit")
1219 1219 trp = weakref.proxy(tr)
1220 1220
1221 1221 if ctx.files():
1222 1222 m1 = p1.manifest().copy()
1223 1223 m2 = p2.manifest()
1224 1224
1225 1225 # check in files
1226 1226 new = {}
1227 1227 changed = []
1228 1228 linkrev = len(self)
1229 1229 for f in sorted(ctx.modified() + ctx.added()):
1230 1230 self.ui.note(f + "\n")
1231 1231 try:
1232 1232 fctx = ctx[f]
1233 1233 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1234 1234 changed)
1235 1235 m1.set(f, fctx.flags())
1236 1236 except OSError, inst:
1237 1237 self.ui.warn(_("trouble committing %s!\n") % f)
1238 1238 raise
1239 1239 except IOError, inst:
1240 1240 errcode = getattr(inst, 'errno', errno.ENOENT)
1241 1241 if error or errcode and errcode != errno.ENOENT:
1242 1242 self.ui.warn(_("trouble committing %s!\n") % f)
1243 1243 raise
1244 1244 else:
1245 1245 removed.append(f)
1246 1246
1247 1247 # update manifest
1248 1248 m1.update(new)
1249 1249 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1250 1250 drop = [f for f in removed if f in m1]
1251 1251 for f in drop:
1252 1252 del m1[f]
1253 1253 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1254 1254 p2.manifestnode(), (new, drop))
1255 1255 files = changed + removed
1256 1256 else:
1257 1257 mn = p1.manifestnode()
1258 1258 files = []
1259 1259
1260 1260 # update changelog
1261 1261 self.changelog.delayupdate()
1262 1262 n = self.changelog.add(mn, files, ctx.description(),
1263 1263 trp, p1.node(), p2.node(),
1264 1264 user, ctx.date(), ctx.extra().copy())
1265 1265 p = lambda: self.changelog.writepending() and self.root or ""
1266 1266 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1267 1267 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1268 1268 parent2=xp2, pending=p)
1269 1269 self.changelog.finalize(trp)
1270 1270 # set the new commit is proper phase
1271 targetphase = self.ui.configint('phases', 'new-commit',
1272 phases.draft)
1271 targetphase = phases.newcommitphase(self.ui)
1273 1272 if targetphase:
1274 1273 # retract boundary do not alter parent changeset.
1275 1274 # if a parent have higher the resulting phase will
1276 1275 # be compliant anyway
1277 1276 #
1278 1277 # if minimal phase was 0 we don't need to retract anything
1279 1278 phases.retractboundary(self, targetphase, [n])
1280 1279 tr.close()
1281 1280 self.updatebranchcache()
1282 1281 return n
1283 1282 finally:
1284 1283 if tr:
1285 1284 tr.release()
1286 1285 lock.release()
1287 1286
1288 1287 def destroyed(self):
1289 1288 '''Inform the repository that nodes have been destroyed.
1290 1289 Intended for use by strip and rollback, so there's a common
1291 1290 place for anything that has to be done after destroying history.'''
1292 1291 # XXX it might be nice if we could take the list of destroyed
1293 1292 # nodes, but I don't see an easy way for rollback() to do that
1294 1293
1295 1294 # Ensure the persistent tag cache is updated. Doing it now
1296 1295 # means that the tag cache only has to worry about destroyed
1297 1296 # heads immediately after a strip/rollback. That in turn
1298 1297 # guarantees that "cachetip == currenttip" (comparing both rev
1299 1298 # and node) always means no nodes have been added or destroyed.
1300 1299
1301 1300 # XXX this is suboptimal when qrefresh'ing: we strip the current
1302 1301 # head, refresh the tag cache, then immediately add a new head.
1303 1302 # But I think doing it this way is necessary for the "instant
1304 1303 # tag cache retrieval" case to work.
1305 1304 self.invalidatecaches()
1306 1305
1307 1306 def walk(self, match, node=None):
1308 1307 '''
1309 1308 walk recursively through the directory tree or a given
1310 1309 changeset, finding all files matched by the match
1311 1310 function
1312 1311 '''
1313 1312 return self[node].walk(match)
1314 1313
1315 1314 def status(self, node1='.', node2=None, match=None,
1316 1315 ignored=False, clean=False, unknown=False,
1317 1316 listsubrepos=False):
1318 1317 """return status of files between two nodes or node and working directory
1319 1318
1320 1319 If node1 is None, use the first dirstate parent instead.
1321 1320 If node2 is None, compare node1 with working directory.
1322 1321 """
1323 1322
1324 1323 def mfmatches(ctx):
1325 1324 mf = ctx.manifest().copy()
1326 1325 for fn in mf.keys():
1327 1326 if not match(fn):
1328 1327 del mf[fn]
1329 1328 return mf
1330 1329
1331 1330 if isinstance(node1, context.changectx):
1332 1331 ctx1 = node1
1333 1332 else:
1334 1333 ctx1 = self[node1]
1335 1334 if isinstance(node2, context.changectx):
1336 1335 ctx2 = node2
1337 1336 else:
1338 1337 ctx2 = self[node2]
1339 1338
1340 1339 working = ctx2.rev() is None
1341 1340 parentworking = working and ctx1 == self['.']
1342 1341 match = match or matchmod.always(self.root, self.getcwd())
1343 1342 listignored, listclean, listunknown = ignored, clean, unknown
1344 1343
1345 1344 # load earliest manifest first for caching reasons
1346 1345 if not working and ctx2.rev() < ctx1.rev():
1347 1346 ctx2.manifest()
1348 1347
1349 1348 if not parentworking:
1350 1349 def bad(f, msg):
1351 1350 if f not in ctx1:
1352 1351 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1353 1352 match.bad = bad
1354 1353
1355 1354 if working: # we need to scan the working dir
1356 1355 subrepos = []
1357 1356 if '.hgsub' in self.dirstate:
1358 1357 subrepos = ctx2.substate.keys()
1359 1358 s = self.dirstate.status(match, subrepos, listignored,
1360 1359 listclean, listunknown)
1361 1360 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1362 1361
1363 1362 # check for any possibly clean files
1364 1363 if parentworking and cmp:
1365 1364 fixup = []
1366 1365 # do a full compare of any files that might have changed
1367 1366 for f in sorted(cmp):
1368 1367 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1369 1368 or ctx1[f].cmp(ctx2[f])):
1370 1369 modified.append(f)
1371 1370 else:
1372 1371 fixup.append(f)
1373 1372
1374 1373 # update dirstate for files that are actually clean
1375 1374 if fixup:
1376 1375 if listclean:
1377 1376 clean += fixup
1378 1377
1379 1378 try:
1380 1379 # updating the dirstate is optional
1381 1380 # so we don't wait on the lock
1382 1381 wlock = self.wlock(False)
1383 1382 try:
1384 1383 for f in fixup:
1385 1384 self.dirstate.normal(f)
1386 1385 finally:
1387 1386 wlock.release()
1388 1387 except error.LockError:
1389 1388 pass
1390 1389
1391 1390 if not parentworking:
1392 1391 mf1 = mfmatches(ctx1)
1393 1392 if working:
1394 1393 # we are comparing working dir against non-parent
1395 1394 # generate a pseudo-manifest for the working dir
1396 1395 mf2 = mfmatches(self['.'])
1397 1396 for f in cmp + modified + added:
1398 1397 mf2[f] = None
1399 1398 mf2.set(f, ctx2.flags(f))
1400 1399 for f in removed:
1401 1400 if f in mf2:
1402 1401 del mf2[f]
1403 1402 else:
1404 1403 # we are comparing two revisions
1405 1404 deleted, unknown, ignored = [], [], []
1406 1405 mf2 = mfmatches(ctx2)
1407 1406
1408 1407 modified, added, clean = [], [], []
1409 1408 for fn in mf2:
1410 1409 if fn in mf1:
1411 1410 if (fn not in deleted and
1412 1411 (mf1.flags(fn) != mf2.flags(fn) or
1413 1412 (mf1[fn] != mf2[fn] and
1414 1413 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1415 1414 modified.append(fn)
1416 1415 elif listclean:
1417 1416 clean.append(fn)
1418 1417 del mf1[fn]
1419 1418 elif fn not in deleted:
1420 1419 added.append(fn)
1421 1420 removed = mf1.keys()
1422 1421
1423 1422 if working and modified and not self.dirstate._checklink:
1424 1423 # Symlink placeholders may get non-symlink-like contents
1425 1424 # via user error or dereferencing by NFS or Samba servers,
1426 1425 # so we filter out any placeholders that don't look like a
1427 1426 # symlink
1428 1427 sane = []
1429 1428 for f in modified:
1430 1429 if ctx2.flags(f) == 'l':
1431 1430 d = ctx2[f].data()
1432 1431 if len(d) >= 1024 or '\n' in d or util.binary(d):
1433 1432 self.ui.debug('ignoring suspect symlink placeholder'
1434 1433 ' "%s"\n' % f)
1435 1434 continue
1436 1435 sane.append(f)
1437 1436 modified = sane
1438 1437
1439 1438 r = modified, added, removed, deleted, unknown, ignored, clean
1440 1439
1441 1440 if listsubrepos:
1442 1441 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1443 1442 if working:
1444 1443 rev2 = None
1445 1444 else:
1446 1445 rev2 = ctx2.substate[subpath][1]
1447 1446 try:
1448 1447 submatch = matchmod.narrowmatcher(subpath, match)
1449 1448 s = sub.status(rev2, match=submatch, ignored=listignored,
1450 1449 clean=listclean, unknown=listunknown,
1451 1450 listsubrepos=True)
1452 1451 for rfiles, sfiles in zip(r, s):
1453 1452 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1454 1453 except error.LookupError:
1455 1454 self.ui.status(_("skipping missing subrepository: %s\n")
1456 1455 % subpath)
1457 1456
1458 1457 for l in r:
1459 1458 l.sort()
1460 1459 return r
1461 1460
1462 1461 def heads(self, start=None):
1463 1462 heads = self.changelog.heads(start)
1464 1463 # sort the output in rev descending order
1465 1464 return sorted(heads, key=self.changelog.rev, reverse=True)
1466 1465
1467 1466 def branchheads(self, branch=None, start=None, closed=False):
1468 1467 '''return a (possibly filtered) list of heads for the given branch
1469 1468
1470 1469 Heads are returned in topological order, from newest to oldest.
1471 1470 If branch is None, use the dirstate branch.
1472 1471 If start is not None, return only heads reachable from start.
1473 1472 If closed is True, return heads that are marked as closed as well.
1474 1473 '''
1475 1474 if branch is None:
1476 1475 branch = self[None].branch()
1477 1476 branches = self.branchmap()
1478 1477 if branch not in branches:
1479 1478 return []
1480 1479 # the cache returns heads ordered lowest to highest
1481 1480 bheads = list(reversed(branches[branch]))
1482 1481 if start is not None:
1483 1482 # filter out the heads that cannot be reached from startrev
1484 1483 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1485 1484 bheads = [h for h in bheads if h in fbheads]
1486 1485 if not closed:
1487 1486 bheads = [h for h in bheads if
1488 1487 ('close' not in self.changelog.read(h)[5])]
1489 1488 return bheads
1490 1489
1491 1490 def branches(self, nodes):
1492 1491 if not nodes:
1493 1492 nodes = [self.changelog.tip()]
1494 1493 b = []
1495 1494 for n in nodes:
1496 1495 t = n
1497 1496 while True:
1498 1497 p = self.changelog.parents(n)
1499 1498 if p[1] != nullid or p[0] == nullid:
1500 1499 b.append((t, n, p[0], p[1]))
1501 1500 break
1502 1501 n = p[0]
1503 1502 return b
1504 1503
1505 1504 def between(self, pairs):
1506 1505 r = []
1507 1506
1508 1507 for top, bottom in pairs:
1509 1508 n, l, i = top, [], 0
1510 1509 f = 1
1511 1510
1512 1511 while n != bottom and n != nullid:
1513 1512 p = self.changelog.parents(n)[0]
1514 1513 if i == f:
1515 1514 l.append(n)
1516 1515 f = f * 2
1517 1516 n = p
1518 1517 i += 1
1519 1518
1520 1519 r.append(l)
1521 1520
1522 1521 return r
1523 1522
1524 1523 def pull(self, remote, heads=None, force=False):
1525 1524 lock = self.lock()
1526 1525 try:
1527 1526 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1528 1527 force=force)
1529 1528 common, fetch, rheads = tmp
1530 1529 if not fetch:
1531 1530 self.ui.status(_("no changes found\n"))
1532 1531 added = []
1533 1532 result = 0
1534 1533 else:
1535 1534 if heads is None and list(common) == [nullid]:
1536 1535 self.ui.status(_("requesting all changes\n"))
1537 1536 elif heads is None and remote.capable('changegroupsubset'):
1538 1537 # issue1320, avoid a race if remote changed after discovery
1539 1538 heads = rheads
1540 1539
1541 1540 if remote.capable('getbundle'):
1542 1541 cg = remote.getbundle('pull', common=common,
1543 1542 heads=heads or rheads)
1544 1543 elif heads is None:
1545 1544 cg = remote.changegroup(fetch, 'pull')
1546 1545 elif not remote.capable('changegroupsubset'):
1547 1546 raise util.Abort(_("partial pull cannot be done because "
1548 1547 "other repository doesn't support "
1549 1548 "changegroupsubset."))
1550 1549 else:
1551 1550 cg = remote.changegroupsubset(fetch, heads, 'pull')
1552 1551 clstart = len(self.changelog)
1553 1552 result = self.addchangegroup(cg, 'pull', remote.url())
1554 1553 clend = len(self.changelog)
1555 1554 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1556 1555
1557 1556 # compute target subset
1558 1557 if heads is None:
1559 1558 # We pulled every thing possible
1560 1559 # sync on everything common
1561 1560 subset = common + added
1562 1561 else:
1563 1562 # We pulled a specific subset
1564 1563 # sync on this subset
1565 1564 subset = heads
1566 1565
1567 1566 # Get remote phases data from remote
1568 1567 remotephases = remote.listkeys('phases')
1569 1568 publishing = bool(remotephases.get('publishing', False))
1570 1569 if remotephases and not publishing:
1571 1570 # remote is new and unpublishing
1572 1571 pheads, _dr = phases.analyzeremotephases(self, subset,
1573 1572 remotephases)
1574 1573 phases.advanceboundary(self, phases.public, pheads)
1575 1574 phases.advanceboundary(self, phases.draft, subset)
1576 1575 else:
1577 1576 # Remote is old or publishing all common changesets
1578 1577 # should be seen as public
1579 1578 phases.advanceboundary(self, phases.public, subset)
1580 1579 finally:
1581 1580 lock.release()
1582 1581
1583 1582 return result
1584 1583
1585 1584 def checkpush(self, force, revs):
1586 1585 """Extensions can override this function if additional checks have
1587 1586 to be performed before pushing, or call it if they override push
1588 1587 command.
1589 1588 """
1590 1589 pass
1591 1590
1592 1591 def push(self, remote, force=False, revs=None, newbranch=False):
1593 1592 '''Push outgoing changesets (limited by revs) from the current
1594 1593 repository to remote. Return an integer:
1595 1594 - None means nothing to push
1596 1595 - 0 means HTTP error
1597 1596 - 1 means we pushed and remote head count is unchanged *or*
1598 1597 we have outgoing changesets but refused to push
1599 1598 - other values as described by addchangegroup()
1600 1599 '''
1601 1600 # there are two ways to push to remote repo:
1602 1601 #
1603 1602 # addchangegroup assumes local user can lock remote
1604 1603 # repo (local filesystem, old ssh servers).
1605 1604 #
1606 1605 # unbundle assumes local user cannot lock remote repo (new ssh
1607 1606 # servers, http servers).
1608 1607
1609 1608 # get local lock as we might write phase data
1610 1609 locallock = self.lock()
1611 1610 try:
1612 1611 self.checkpush(force, revs)
1613 1612 lock = None
1614 1613 unbundle = remote.capable('unbundle')
1615 1614 if not unbundle:
1616 1615 lock = remote.lock()
1617 1616 try:
1618 1617 # discovery
1619 1618 fci = discovery.findcommonincoming
1620 1619 commoninc = fci(self, remote, force=force)
1621 1620 common, inc, remoteheads = commoninc
1622 1621 fco = discovery.findcommonoutgoing
1623 1622 outgoing = fco(self, remote, onlyheads=revs,
1624 1623 commoninc=commoninc, force=force)
1625 1624
1626 1625
1627 1626 if not outgoing.missing:
1628 1627 # nothing to push
1629 1628 scmutil.nochangesfound(self.ui, outgoing.excluded)
1630 1629 ret = None
1631 1630 else:
1632 1631 # something to push
1633 1632 if not force:
1634 1633 discovery.checkheads(self, remote, outgoing,
1635 1634 remoteheads, newbranch,
1636 1635 bool(inc))
1637 1636
1638 1637 # create a changegroup from local
1639 1638 if revs is None and not outgoing.excluded:
1640 1639 # push everything,
1641 1640 # use the fast path, no race possible on push
1642 1641 cg = self._changegroup(outgoing.missing, 'push')
1643 1642 else:
1644 1643 cg = self.getlocalbundle('push', outgoing)
1645 1644
1646 1645 # apply changegroup to remote
1647 1646 if unbundle:
1648 1647 # local repo finds heads on server, finds out what
1649 1648 # revs it must push. once revs transferred, if server
1650 1649 # finds it has different heads (someone else won
1651 1650 # commit/push race), server aborts.
1652 1651 if force:
1653 1652 remoteheads = ['force']
1654 1653 # ssh: return remote's addchangegroup()
1655 1654 # http: return remote's addchangegroup() or 0 for error
1656 1655 ret = remote.unbundle(cg, remoteheads, 'push')
1657 1656 else:
1658 1657 # we return an integer indicating remote head count change
1659 1658 ret = remote.addchangegroup(cg, 'push', self.url())
1660 1659
1661 1660 if ret:
1662 1661 # push succeed, synchonize target of the push
1663 1662 cheads = outgoing.missingheads
1664 1663 elif revs is None:
1665 1664 # All out push fails. synchronize all common
1666 1665 cheads = outgoing.commonheads
1667 1666 else:
1668 1667 # I want cheads = heads(::missingheads and ::commonheads)
1669 1668 # (missingheads is revs with secret changeset filtered out)
1670 1669 #
1671 1670 # This can be expressed as:
1672 1671 # cheads = ( (missingheads and ::commonheads)
1673 1672 # + (commonheads and ::missingheads))"
1674 1673 # )
1675 1674 #
1676 1675 # while trying to push we already computed the following:
1677 1676 # common = (::commonheads)
1678 1677 # missing = ((commonheads::missingheads) - commonheads)
1679 1678 #
1680 1679 # We can pick:
1681 1680 # * missingheads part of comon (::commonheads)
1682 1681 common = set(outgoing.common)
1683 1682 cheads = [node for node in revs if node in common]
1684 1683 # and
1685 1684 # * commonheads parents on missing
1686 1685 revset = self.set('%ln and parents(roots(%ln))',
1687 1686 outgoing.commonheads,
1688 1687 outgoing.missing)
1689 1688 cheads.extend(c.node() for c in revset)
1690 1689 # even when we don't push, exchanging phase data is useful
1691 1690 remotephases = remote.listkeys('phases')
1692 1691 if not remotephases: # old server or public only repo
1693 1692 phases.advanceboundary(self, phases.public, cheads)
1694 1693 # don't push any phase data as there is nothing to push
1695 1694 else:
1696 1695 ana = phases.analyzeremotephases(self, cheads, remotephases)
1697 1696 pheads, droots = ana
1698 1697 ### Apply remote phase on local
1699 1698 if remotephases.get('publishing', False):
1700 1699 phases.advanceboundary(self, phases.public, cheads)
1701 1700 else: # publish = False
1702 1701 phases.advanceboundary(self, phases.public, pheads)
1703 1702 phases.advanceboundary(self, phases.draft, cheads)
1704 1703 ### Apply local phase on remote
1705 1704
1706 1705 # Get the list of all revs draft on remote by public here.
1707 1706 # XXX Beware that revset break if droots is not strictly
1708 1707 # XXX root we may want to ensure it is but it is costly
1709 1708 outdated = self.set('heads((%ln::%ln) and public())',
1710 1709 droots, cheads)
1711 1710 for newremotehead in outdated:
1712 1711 r = remote.pushkey('phases',
1713 1712 newremotehead.hex(),
1714 1713 str(phases.draft),
1715 1714 str(phases.public))
1716 1715 if not r:
1717 1716 self.ui.warn(_('updating %s to public failed!\n')
1718 1717 % newremotehead)
1719 1718 finally:
1720 1719 if lock is not None:
1721 1720 lock.release()
1722 1721 finally:
1723 1722 locallock.release()
1724 1723
1725 1724 self.ui.debug("checking for updated bookmarks\n")
1726 1725 rb = remote.listkeys('bookmarks')
1727 1726 for k in rb.keys():
1728 1727 if k in self._bookmarks:
1729 1728 nr, nl = rb[k], hex(self._bookmarks[k])
1730 1729 if nr in self:
1731 1730 cr = self[nr]
1732 1731 cl = self[nl]
1733 1732 if cl in cr.descendants():
1734 1733 r = remote.pushkey('bookmarks', k, nr, nl)
1735 1734 if r:
1736 1735 self.ui.status(_("updating bookmark %s\n") % k)
1737 1736 else:
1738 1737 self.ui.warn(_('updating bookmark %s'
1739 1738 ' failed!\n') % k)
1740 1739
1741 1740 return ret
1742 1741
1743 1742 def changegroupinfo(self, nodes, source):
1744 1743 if self.ui.verbose or source == 'bundle':
1745 1744 self.ui.status(_("%d changesets found\n") % len(nodes))
1746 1745 if self.ui.debugflag:
1747 1746 self.ui.debug("list of changesets:\n")
1748 1747 for node in nodes:
1749 1748 self.ui.debug("%s\n" % hex(node))
1750 1749
1751 1750 def changegroupsubset(self, bases, heads, source):
1752 1751 """Compute a changegroup consisting of all the nodes that are
1753 1752 descendants of any of the bases and ancestors of any of the heads.
1754 1753 Return a chunkbuffer object whose read() method will return
1755 1754 successive changegroup chunks.
1756 1755
1757 1756 It is fairly complex as determining which filenodes and which
1758 1757 manifest nodes need to be included for the changeset to be complete
1759 1758 is non-trivial.
1760 1759
1761 1760 Another wrinkle is doing the reverse, figuring out which changeset in
1762 1761 the changegroup a particular filenode or manifestnode belongs to.
1763 1762 """
1764 1763 cl = self.changelog
1765 1764 if not bases:
1766 1765 bases = [nullid]
1767 1766 csets, bases, heads = cl.nodesbetween(bases, heads)
1768 1767 # We assume that all ancestors of bases are known
1769 1768 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1770 1769 return self._changegroupsubset(common, csets, heads, source)
1771 1770
1772 1771 def getlocalbundle(self, source, outgoing):
1773 1772 """Like getbundle, but taking a discovery.outgoing as an argument.
1774 1773
1775 1774 This is only implemented for local repos and reuses potentially
1776 1775 precomputed sets in outgoing."""
1777 1776 if not outgoing.missing:
1778 1777 return None
1779 1778 return self._changegroupsubset(outgoing.common,
1780 1779 outgoing.missing,
1781 1780 outgoing.missingheads,
1782 1781 source)
1783 1782
1784 1783 def getbundle(self, source, heads=None, common=None):
1785 1784 """Like changegroupsubset, but returns the set difference between the
1786 1785 ancestors of heads and the ancestors common.
1787 1786
1788 1787 If heads is None, use the local heads. If common is None, use [nullid].
1789 1788
1790 1789 The nodes in common might not all be known locally due to the way the
1791 1790 current discovery protocol works.
1792 1791 """
1793 1792 cl = self.changelog
1794 1793 if common:
1795 1794 nm = cl.nodemap
1796 1795 common = [n for n in common if n in nm]
1797 1796 else:
1798 1797 common = [nullid]
1799 1798 if not heads:
1800 1799 heads = cl.heads()
1801 1800 return self.getlocalbundle(source,
1802 1801 discovery.outgoing(cl, common, heads))
1803 1802
1804 1803 def _changegroupsubset(self, commonrevs, csets, heads, source):
1805 1804
1806 1805 cl = self.changelog
1807 1806 mf = self.manifest
1808 1807 mfs = {} # needed manifests
1809 1808 fnodes = {} # needed file nodes
1810 1809 changedfiles = set()
1811 1810 fstate = ['', {}]
1812 1811 count = [0]
1813 1812
1814 1813 # can we go through the fast path ?
1815 1814 heads.sort()
1816 1815 if heads == sorted(self.heads()):
1817 1816 return self._changegroup(csets, source)
1818 1817
1819 1818 # slow path
1820 1819 self.hook('preoutgoing', throw=True, source=source)
1821 1820 self.changegroupinfo(csets, source)
1822 1821
1823 1822 # filter any nodes that claim to be part of the known set
1824 1823 def prune(revlog, missing):
1825 1824 return [n for n in missing
1826 1825 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1827 1826
1828 1827 def lookup(revlog, x):
1829 1828 if revlog == cl:
1830 1829 c = cl.read(x)
1831 1830 changedfiles.update(c[3])
1832 1831 mfs.setdefault(c[0], x)
1833 1832 count[0] += 1
1834 1833 self.ui.progress(_('bundling'), count[0],
1835 1834 unit=_('changesets'), total=len(csets))
1836 1835 return x
1837 1836 elif revlog == mf:
1838 1837 clnode = mfs[x]
1839 1838 mdata = mf.readfast(x)
1840 1839 for f in changedfiles:
1841 1840 if f in mdata:
1842 1841 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1843 1842 count[0] += 1
1844 1843 self.ui.progress(_('bundling'), count[0],
1845 1844 unit=_('manifests'), total=len(mfs))
1846 1845 return mfs[x]
1847 1846 else:
1848 1847 self.ui.progress(
1849 1848 _('bundling'), count[0], item=fstate[0],
1850 1849 unit=_('files'), total=len(changedfiles))
1851 1850 return fstate[1][x]
1852 1851
1853 1852 bundler = changegroup.bundle10(lookup)
1854 1853 reorder = self.ui.config('bundle', 'reorder', 'auto')
1855 1854 if reorder == 'auto':
1856 1855 reorder = None
1857 1856 else:
1858 1857 reorder = util.parsebool(reorder)
1859 1858
1860 1859 def gengroup():
1861 1860 # Create a changenode group generator that will call our functions
1862 1861 # back to lookup the owning changenode and collect information.
1863 1862 for chunk in cl.group(csets, bundler, reorder=reorder):
1864 1863 yield chunk
1865 1864 self.ui.progress(_('bundling'), None)
1866 1865
1867 1866 # Create a generator for the manifestnodes that calls our lookup
1868 1867 # and data collection functions back.
1869 1868 count[0] = 0
1870 1869 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1871 1870 yield chunk
1872 1871 self.ui.progress(_('bundling'), None)
1873 1872
1874 1873 mfs.clear()
1875 1874
1876 1875 # Go through all our files in order sorted by name.
1877 1876 count[0] = 0
1878 1877 for fname in sorted(changedfiles):
1879 1878 filerevlog = self.file(fname)
1880 1879 if not len(filerevlog):
1881 1880 raise util.Abort(_("empty or missing revlog for %s") % fname)
1882 1881 fstate[0] = fname
1883 1882 fstate[1] = fnodes.pop(fname, {})
1884 1883
1885 1884 nodelist = prune(filerevlog, fstate[1])
1886 1885 if nodelist:
1887 1886 count[0] += 1
1888 1887 yield bundler.fileheader(fname)
1889 1888 for chunk in filerevlog.group(nodelist, bundler, reorder):
1890 1889 yield chunk
1891 1890
1892 1891 # Signal that no more groups are left.
1893 1892 yield bundler.close()
1894 1893 self.ui.progress(_('bundling'), None)
1895 1894
1896 1895 if csets:
1897 1896 self.hook('outgoing', node=hex(csets[0]), source=source)
1898 1897
1899 1898 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1900 1899
1901 1900 def changegroup(self, basenodes, source):
1902 1901 # to avoid a race we use changegroupsubset() (issue1320)
1903 1902 return self.changegroupsubset(basenodes, self.heads(), source)
1904 1903
1905 1904 def _changegroup(self, nodes, source):
1906 1905 """Compute the changegroup of all nodes that we have that a recipient
1907 1906 doesn't. Return a chunkbuffer object whose read() method will return
1908 1907 successive changegroup chunks.
1909 1908
1910 1909 This is much easier than the previous function as we can assume that
1911 1910 the recipient has any changenode we aren't sending them.
1912 1911
1913 1912 nodes is the set of nodes to send"""
1914 1913
1915 1914 cl = self.changelog
1916 1915 mf = self.manifest
1917 1916 mfs = {}
1918 1917 changedfiles = set()
1919 1918 fstate = ['']
1920 1919 count = [0]
1921 1920
1922 1921 self.hook('preoutgoing', throw=True, source=source)
1923 1922 self.changegroupinfo(nodes, source)
1924 1923
1925 1924 revset = set([cl.rev(n) for n in nodes])
1926 1925
1927 1926 def gennodelst(log):
1928 1927 return [log.node(r) for r in log if log.linkrev(r) in revset]
1929 1928
1930 1929 def lookup(revlog, x):
1931 1930 if revlog == cl:
1932 1931 c = cl.read(x)
1933 1932 changedfiles.update(c[3])
1934 1933 mfs.setdefault(c[0], x)
1935 1934 count[0] += 1
1936 1935 self.ui.progress(_('bundling'), count[0],
1937 1936 unit=_('changesets'), total=len(nodes))
1938 1937 return x
1939 1938 elif revlog == mf:
1940 1939 count[0] += 1
1941 1940 self.ui.progress(_('bundling'), count[0],
1942 1941 unit=_('manifests'), total=len(mfs))
1943 1942 return cl.node(revlog.linkrev(revlog.rev(x)))
1944 1943 else:
1945 1944 self.ui.progress(
1946 1945 _('bundling'), count[0], item=fstate[0],
1947 1946 total=len(changedfiles), unit=_('files'))
1948 1947 return cl.node(revlog.linkrev(revlog.rev(x)))
1949 1948
1950 1949 bundler = changegroup.bundle10(lookup)
1951 1950 reorder = self.ui.config('bundle', 'reorder', 'auto')
1952 1951 if reorder == 'auto':
1953 1952 reorder = None
1954 1953 else:
1955 1954 reorder = util.parsebool(reorder)
1956 1955
1957 1956 def gengroup():
1958 1957 '''yield a sequence of changegroup chunks (strings)'''
1959 1958 # construct a list of all changed files
1960 1959
1961 1960 for chunk in cl.group(nodes, bundler, reorder=reorder):
1962 1961 yield chunk
1963 1962 self.ui.progress(_('bundling'), None)
1964 1963
1965 1964 count[0] = 0
1966 1965 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1967 1966 yield chunk
1968 1967 self.ui.progress(_('bundling'), None)
1969 1968
1970 1969 count[0] = 0
1971 1970 for fname in sorted(changedfiles):
1972 1971 filerevlog = self.file(fname)
1973 1972 if not len(filerevlog):
1974 1973 raise util.Abort(_("empty or missing revlog for %s") % fname)
1975 1974 fstate[0] = fname
1976 1975 nodelist = gennodelst(filerevlog)
1977 1976 if nodelist:
1978 1977 count[0] += 1
1979 1978 yield bundler.fileheader(fname)
1980 1979 for chunk in filerevlog.group(nodelist, bundler, reorder):
1981 1980 yield chunk
1982 1981 yield bundler.close()
1983 1982 self.ui.progress(_('bundling'), None)
1984 1983
1985 1984 if nodes:
1986 1985 self.hook('outgoing', node=hex(nodes[0]), source=source)
1987 1986
1988 1987 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1989 1988
1990 1989 def addchangegroup(self, source, srctype, url, emptyok=False):
1991 1990 """Add the changegroup returned by source.read() to this repo.
1992 1991 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1993 1992 the URL of the repo where this changegroup is coming from.
1994 1993
1995 1994 Return an integer summarizing the change to this repo:
1996 1995 - nothing changed or no source: 0
1997 1996 - more heads than before: 1+added heads (2..n)
1998 1997 - fewer heads than before: -1-removed heads (-2..-n)
1999 1998 - number of heads stays the same: 1
2000 1999 """
2001 2000 def csmap(x):
2002 2001 self.ui.debug("add changeset %s\n" % short(x))
2003 2002 return len(cl)
2004 2003
2005 2004 def revmap(x):
2006 2005 return cl.rev(x)
2007 2006
2008 2007 if not source:
2009 2008 return 0
2010 2009
2011 2010 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2012 2011
2013 2012 changesets = files = revisions = 0
2014 2013 efiles = set()
2015 2014
2016 2015 # write changelog data to temp files so concurrent readers will not see
2017 2016 # inconsistent view
2018 2017 cl = self.changelog
2019 2018 cl.delayupdate()
2020 2019 oldheads = cl.heads()
2021 2020
2022 2021 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2023 2022 try:
2024 2023 trp = weakref.proxy(tr)
2025 2024 # pull off the changeset group
2026 2025 self.ui.status(_("adding changesets\n"))
2027 2026 clstart = len(cl)
2028 2027 class prog(object):
2029 2028 step = _('changesets')
2030 2029 count = 1
2031 2030 ui = self.ui
2032 2031 total = None
2033 2032 def __call__(self):
2034 2033 self.ui.progress(self.step, self.count, unit=_('chunks'),
2035 2034 total=self.total)
2036 2035 self.count += 1
2037 2036 pr = prog()
2038 2037 source.callback = pr
2039 2038
2040 2039 source.changelogheader()
2041 2040 srccontent = cl.addgroup(source, csmap, trp)
2042 2041 if not (srccontent or emptyok):
2043 2042 raise util.Abort(_("received changelog group is empty"))
2044 2043 clend = len(cl)
2045 2044 changesets = clend - clstart
2046 2045 for c in xrange(clstart, clend):
2047 2046 efiles.update(self[c].files())
2048 2047 efiles = len(efiles)
2049 2048 self.ui.progress(_('changesets'), None)
2050 2049
2051 2050 # pull off the manifest group
2052 2051 self.ui.status(_("adding manifests\n"))
2053 2052 pr.step = _('manifests')
2054 2053 pr.count = 1
2055 2054 pr.total = changesets # manifests <= changesets
2056 2055 # no need to check for empty manifest group here:
2057 2056 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2058 2057 # no new manifest will be created and the manifest group will
2059 2058 # be empty during the pull
2060 2059 source.manifestheader()
2061 2060 self.manifest.addgroup(source, revmap, trp)
2062 2061 self.ui.progress(_('manifests'), None)
2063 2062
2064 2063 needfiles = {}
2065 2064 if self.ui.configbool('server', 'validate', default=False):
2066 2065 # validate incoming csets have their manifests
2067 2066 for cset in xrange(clstart, clend):
2068 2067 mfest = self.changelog.read(self.changelog.node(cset))[0]
2069 2068 mfest = self.manifest.readdelta(mfest)
2070 2069 # store file nodes we must see
2071 2070 for f, n in mfest.iteritems():
2072 2071 needfiles.setdefault(f, set()).add(n)
2073 2072
2074 2073 # process the files
2075 2074 self.ui.status(_("adding file changes\n"))
2076 2075 pr.step = _('files')
2077 2076 pr.count = 1
2078 2077 pr.total = efiles
2079 2078 source.callback = None
2080 2079
2081 2080 while True:
2082 2081 chunkdata = source.filelogheader()
2083 2082 if not chunkdata:
2084 2083 break
2085 2084 f = chunkdata["filename"]
2086 2085 self.ui.debug("adding %s revisions\n" % f)
2087 2086 pr()
2088 2087 fl = self.file(f)
2089 2088 o = len(fl)
2090 2089 if not fl.addgroup(source, revmap, trp):
2091 2090 raise util.Abort(_("received file revlog group is empty"))
2092 2091 revisions += len(fl) - o
2093 2092 files += 1
2094 2093 if f in needfiles:
2095 2094 needs = needfiles[f]
2096 2095 for new in xrange(o, len(fl)):
2097 2096 n = fl.node(new)
2098 2097 if n in needs:
2099 2098 needs.remove(n)
2100 2099 if not needs:
2101 2100 del needfiles[f]
2102 2101 self.ui.progress(_('files'), None)
2103 2102
2104 2103 for f, needs in needfiles.iteritems():
2105 2104 fl = self.file(f)
2106 2105 for n in needs:
2107 2106 try:
2108 2107 fl.rev(n)
2109 2108 except error.LookupError:
2110 2109 raise util.Abort(
2111 2110 _('missing file data for %s:%s - run hg verify') %
2112 2111 (f, hex(n)))
2113 2112
2114 2113 dh = 0
2115 2114 if oldheads:
2116 2115 heads = cl.heads()
2117 2116 dh = len(heads) - len(oldheads)
2118 2117 for h in heads:
2119 2118 if h not in oldheads and 'close' in self[h].extra():
2120 2119 dh -= 1
2121 2120 htext = ""
2122 2121 if dh:
2123 2122 htext = _(" (%+d heads)") % dh
2124 2123
2125 2124 self.ui.status(_("added %d changesets"
2126 2125 " with %d changes to %d files%s\n")
2127 2126 % (changesets, revisions, files, htext))
2128 2127
2129 2128 if changesets > 0:
2130 2129 p = lambda: cl.writepending() and self.root or ""
2131 2130 self.hook('pretxnchangegroup', throw=True,
2132 2131 node=hex(cl.node(clstart)), source=srctype,
2133 2132 url=url, pending=p)
2134 2133
2135 2134 added = [cl.node(r) for r in xrange(clstart, clend)]
2136 2135 publishing = self.ui.configbool('phases', 'publish', True)
2137 2136 if srctype == 'push':
2138 2137 # Old server can not push the boundary themself.
2139 2138 # New server won't push the boundary if changeset already
2140 2139 # existed locally as secrete
2141 2140 #
2142 2141 # We should not use added here but the list of all change in
2143 2142 # the bundle
2144 2143 if publishing:
2145 2144 phases.advanceboundary(self, phases.public, srccontent)
2146 2145 else:
2147 2146 phases.advanceboundary(self, phases.draft, srccontent)
2148 2147 phases.retractboundary(self, phases.draft, added)
2149 2148 elif srctype != 'strip':
2150 2149 # publishing only alter behavior during push
2151 2150 #
2152 2151 # strip should not touch boundary at all
2153 2152 phases.retractboundary(self, phases.draft, added)
2154 2153
2155 2154 # make changelog see real files again
2156 2155 cl.finalize(trp)
2157 2156
2158 2157 tr.close()
2159 2158
2160 2159 if changesets > 0:
2161 2160 def runhooks():
2162 2161 # forcefully update the on-disk branch cache
2163 2162 self.ui.debug("updating the branch cache\n")
2164 2163 self.updatebranchcache()
2165 2164 self.hook("changegroup", node=hex(cl.node(clstart)),
2166 2165 source=srctype, url=url)
2167 2166
2168 2167 for n in added:
2169 2168 self.hook("incoming", node=hex(n), source=srctype,
2170 2169 url=url)
2171 2170 self._afterlock(runhooks)
2172 2171
2173 2172 finally:
2174 2173 tr.release()
2175 2174 # never return 0 here:
2176 2175 if dh < 0:
2177 2176 return dh - 1
2178 2177 else:
2179 2178 return dh + 1
2180 2179
2181 2180 def stream_in(self, remote, requirements):
2182 2181 lock = self.lock()
2183 2182 try:
2184 2183 fp = remote.stream_out()
2185 2184 l = fp.readline()
2186 2185 try:
2187 2186 resp = int(l)
2188 2187 except ValueError:
2189 2188 raise error.ResponseError(
2190 2189 _('Unexpected response from remote server:'), l)
2191 2190 if resp == 1:
2192 2191 raise util.Abort(_('operation forbidden by server'))
2193 2192 elif resp == 2:
2194 2193 raise util.Abort(_('locking the remote repository failed'))
2195 2194 elif resp != 0:
2196 2195 raise util.Abort(_('the server sent an unknown error code'))
2197 2196 self.ui.status(_('streaming all changes\n'))
2198 2197 l = fp.readline()
2199 2198 try:
2200 2199 total_files, total_bytes = map(int, l.split(' ', 1))
2201 2200 except (ValueError, TypeError):
2202 2201 raise error.ResponseError(
2203 2202 _('Unexpected response from remote server:'), l)
2204 2203 self.ui.status(_('%d files to transfer, %s of data\n') %
2205 2204 (total_files, util.bytecount(total_bytes)))
2206 2205 start = time.time()
2207 2206 for i in xrange(total_files):
2208 2207 # XXX doesn't support '\n' or '\r' in filenames
2209 2208 l = fp.readline()
2210 2209 try:
2211 2210 name, size = l.split('\0', 1)
2212 2211 size = int(size)
2213 2212 except (ValueError, TypeError):
2214 2213 raise error.ResponseError(
2215 2214 _('Unexpected response from remote server:'), l)
2216 2215 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2217 2216 # for backwards compat, name was partially encoded
2218 2217 ofp = self.sopener(store.decodedir(name), 'w')
2219 2218 for chunk in util.filechunkiter(fp, limit=size):
2220 2219 ofp.write(chunk)
2221 2220 ofp.close()
2222 2221 elapsed = time.time() - start
2223 2222 if elapsed <= 0:
2224 2223 elapsed = 0.001
2225 2224 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2226 2225 (util.bytecount(total_bytes), elapsed,
2227 2226 util.bytecount(total_bytes / elapsed)))
2228 2227
2229 2228 # new requirements = old non-format requirements + new format-related
2230 2229 # requirements from the streamed-in repository
2231 2230 requirements.update(set(self.requirements) - self.supportedformats)
2232 2231 self._applyrequirements(requirements)
2233 2232 self._writerequirements()
2234 2233
2235 2234 self.invalidate()
2236 2235 return len(self.heads()) + 1
2237 2236 finally:
2238 2237 lock.release()
2239 2238
2240 2239 def clone(self, remote, heads=[], stream=False):
2241 2240 '''clone remote repository.
2242 2241
2243 2242 keyword arguments:
2244 2243 heads: list of revs to clone (forces use of pull)
2245 2244 stream: use streaming clone if possible'''
2246 2245
2247 2246 # now, all clients that can request uncompressed clones can
2248 2247 # read repo formats supported by all servers that can serve
2249 2248 # them.
2250 2249
2251 2250 # if revlog format changes, client will have to check version
2252 2251 # and format flags on "stream" capability, and use
2253 2252 # uncompressed only if compatible.
2254 2253
2255 2254 if stream and not heads:
2256 2255 # 'stream' means remote revlog format is revlogv1 only
2257 2256 if remote.capable('stream'):
2258 2257 return self.stream_in(remote, set(('revlogv1',)))
2259 2258 # otherwise, 'streamreqs' contains the remote revlog format
2260 2259 streamreqs = remote.capable('streamreqs')
2261 2260 if streamreqs:
2262 2261 streamreqs = set(streamreqs.split(','))
2263 2262 # if we support it, stream in and adjust our requirements
2264 2263 if not streamreqs - self.supportedformats:
2265 2264 return self.stream_in(remote, streamreqs)
2266 2265 return self.pull(remote, heads)
2267 2266
2268 2267 def pushkey(self, namespace, key, old, new):
2269 2268 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2270 2269 old=old, new=new)
2271 2270 ret = pushkey.push(self, namespace, key, old, new)
2272 2271 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2273 2272 ret=ret)
2274 2273 return ret
2275 2274
2276 2275 def listkeys(self, namespace):
2277 2276 self.hook('prelistkeys', throw=True, namespace=namespace)
2278 2277 values = pushkey.list(self, namespace)
2279 2278 self.hook('listkeys', namespace=namespace, values=values)
2280 2279 return values
2281 2280
2282 2281 def debugwireargs(self, one, two, three=None, four=None, five=None):
2283 2282 '''used to test argument passing over the wire'''
2284 2283 return "%s %s %s %s %s" % (one, two, three, four, five)
2285 2284
2286 2285 def savecommitmessage(self, text):
2287 2286 fp = self.opener('last-message.txt', 'wb')
2288 2287 try:
2289 2288 fp.write(text)
2290 2289 finally:
2291 2290 fp.close()
2292 2291 return self.pathto(fp.name[len(self.root)+1:])
2293 2292
2294 2293 # used to avoid circular references so destructors work
2295 2294 def aftertrans(files):
2296 2295 renamefiles = [tuple(t) for t in files]
2297 2296 def a():
2298 2297 for src, dest in renamefiles:
2299 2298 util.rename(src, dest)
2300 2299 return a
2301 2300
2302 2301 def undoname(fn):
2303 2302 base, name = os.path.split(fn)
2304 2303 assert name.startswith('journal')
2305 2304 return os.path.join(base, name.replace('journal', 'undo', 1))
2306 2305
2307 2306 def instance(ui, path, create):
2308 2307 return localrepository(ui, util.urllocalpath(path), create)
2309 2308
2310 2309 def islocal(path):
2311 2310 return True
@@ -1,300 +1,317 b''
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms of the
10 10 GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phases' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described below,
22 22 here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not permanent and
25 25 leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered, so they
28 28 can be considered from lowest to highest. The default, lowest phase is 'public'
29 29 - this is the normal phase of existing changesets. A child changeset can not be
30 30 in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 local commits are draft by default
40 40
41 41 Phase movement and exchange
42 42 ============================
43 43
44 44 Phase data are exchanged by pushkey on pull and push. Some server have a
45 45 publish option set, we call them publishing server. Pushing to such server make
46 46 draft changeset publish.
47 47
48 48 A small list of fact/rules define the exchange of phase:
49 49
50 50 * old client never changes server states
51 51 * pull never changes server states
52 52 * publish and old server csets are seen as public by client
53 53
54 54 * Any secret changeset seens in another repository is lowered to at least draft
55 55
56 56
57 57 Here is the final table summing up the 49 possible usecase of phase exchange:
58 58
59 59 server
60 60 old publish non-publish
61 61 N X N D P N D P
62 62 old client
63 63 pull
64 64 N - X/X - X/D X/P - X/D X/P
65 65 X - X/X - X/D X/P - X/D X/P
66 66 push
67 67 X X/X X/X X/P X/P X/P X/D X/D X/P
68 68 new client
69 69 pull
70 70 N - P/X - P/D P/P - D/D P/P
71 71 D - P/X - P/D P/P - D/D P/P
72 72 P - P/X - P/D P/P - P/D P/P
73 73 push
74 74 D P/X P/X P/P P/P P/P D/D D/D P/P
75 75 P P/X P/X P/P P/P P/P P/P P/P P/P
76 76
77 77 Legend:
78 78
79 79 A/B = final state on client / state on server
80 80
81 81 * N = new/not present,
82 82 * P = public,
83 83 * D = draft,
84 84 * X = not tracked (ie: the old client or server has no internal way of
85 85 recording the phase.)
86 86
87 87 passive = only pushes
88 88
89 89
90 90 A cell here can be read like this:
91 91
92 92 "When a new client pushes a draft changeset (D) to a publishing server
93 93 where it's not present (N), it's marked public on both sides (P/P)."
94 94
95 95 Note: old client behave as publish server with Draft only content
96 96 - other people see it as public
97 97 - content is pushed as draft
98 98
99 99 """
100 100
101 101 import errno
102 102 from node import nullid, bin, hex, short
103 103 from i18n import _
104 104
105 105 allphases = public, draft, secret = range(3)
106 106 trackedphases = allphases[1:]
107 107 phasenames = ['public', 'draft', 'secret']
108 108
109 109 def readroots(repo):
110 110 """Read phase roots from disk"""
111 111 roots = [set() for i in allphases]
112 112 try:
113 113 f = repo.sopener('phaseroots')
114 114 try:
115 115 for line in f:
116 116 phase, nh = line.strip().split()
117 117 roots[int(phase)].add(bin(nh))
118 118 finally:
119 119 f.close()
120 120 except IOError, inst:
121 121 if inst.errno != errno.ENOENT:
122 122 raise
123 123 for f in repo._phasedefaults:
124 124 roots = f(repo, roots)
125 125 repo._dirtyphases = True
126 126 return roots
127 127
128 128 def writeroots(repo):
129 129 """Write phase roots from disk"""
130 130 f = repo.sopener('phaseroots', 'w', atomictemp=True)
131 131 try:
132 132 for phase, roots in enumerate(repo._phaseroots):
133 133 for h in roots:
134 134 f.write('%i %s\n' % (phase, hex(h)))
135 135 repo._dirtyphases = False
136 136 finally:
137 137 f.close()
138 138
139 139 def filterunknown(repo, phaseroots=None):
140 140 """remove unknown nodes from the phase boundary
141 141
142 142 no data is lost as unknown node only old data for their descentants
143 143 """
144 144 if phaseroots is None:
145 145 phaseroots = repo._phaseroots
146 146 nodemap = repo.changelog.nodemap # to filter unknown nodes
147 147 for phase, nodes in enumerate(phaseroots):
148 148 missing = [node for node in nodes if node not in nodemap]
149 149 if missing:
150 150 for mnode in missing:
151 151 msg = 'Removing unknown node %(n)s from %(p)i-phase boundary'
152 152 repo.ui.debug(msg, {'n': short(mnode), 'p': phase})
153 153 nodes.symmetric_difference_update(missing)
154 154 repo._dirtyphases = True
155 155
156 156 def advanceboundary(repo, targetphase, nodes):
157 157 """Add nodes to a phase changing other nodes phases if necessary.
158 158
159 159 This function move boundary *forward* this means that all nodes are set
160 160 in the target phase or kept in a *lower* phase.
161 161
162 162 Simplify boundary to contains phase roots only."""
163 163 delroots = [] # set of root deleted by this path
164 164 for phase in xrange(targetphase + 1, len(allphases)):
165 165 # filter nodes that are not in a compatible phase already
166 166 # XXX rev phase cache might have been invalidated by a previous loop
167 167 # XXX we need to be smarter here
168 168 nodes = [n for n in nodes if repo[n].phase() >= phase]
169 169 if not nodes:
170 170 break # no roots to move anymore
171 171 roots = repo._phaseroots[phase]
172 172 olds = roots.copy()
173 173 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
174 174 roots.clear()
175 175 roots.update(ctx.node() for ctx in ctxs)
176 176 if olds != roots:
177 177 # invalidate cache (we probably could be smarter here
178 178 if '_phaserev' in vars(repo):
179 179 del repo._phaserev
180 180 repo._dirtyphases = True
181 181 # some roots may need to be declared for lower phases
182 182 delroots.extend(olds - roots)
183 183 # declare deleted root in the target phase
184 184 if targetphase != 0:
185 185 retractboundary(repo, targetphase, delroots)
186 186
187 187
188 188 def retractboundary(repo, targetphase, nodes):
189 189 """Set nodes back to a phase changing other nodes phases if necessary.
190 190
191 191 This function move boundary *backward* this means that all nodes are set
192 192 in the target phase or kept in a *higher* phase.
193 193
194 194 Simplify boundary to contains phase roots only."""
195 195 currentroots = repo._phaseroots[targetphase]
196 196 newroots = [n for n in nodes if repo[n].phase() < targetphase]
197 197 if newroots:
198 198 currentroots.update(newroots)
199 199 ctxs = repo.set('roots(%ln::)', currentroots)
200 200 currentroots.intersection_update(ctx.node() for ctx in ctxs)
201 201 if '_phaserev' in vars(repo):
202 202 del repo._phaserev
203 203 repo._dirtyphases = True
204 204
205 205
206 206 def listphases(repo):
207 207 """List phases root for serialisation over pushkey"""
208 208 keys = {}
209 209 value = '%i' % draft
210 210 for root in repo._phaseroots[draft]:
211 211 keys[hex(root)] = value
212 212
213 213 if repo.ui.configbool('phases', 'publish', True):
214 214 # Add an extra data to let remote know we are a publishing repo.
215 215 # Publishing repo can't just pretend they are old repo. When pushing to
216 216 # a publishing repo, the client still need to push phase boundary
217 217 #
218 218 # Push do not only push changeset. It also push phase data. New
219 219 # phase data may apply to common changeset which won't be push (as they
220 220 # are common). Here is a very simple example:
221 221 #
222 222 # 1) repo A push changeset X as draft to repo B
223 223 # 2) repo B make changeset X public
224 224 # 3) repo B push to repo A. X is not pushed but the data that X as now
225 225 # public should
226 226 #
227 227 # The server can't handle it on it's own as it has no idea of client
228 228 # phase data.
229 229 keys['publishing'] = 'True'
230 230 return keys
231 231
232 232 def pushphase(repo, nhex, oldphasestr, newphasestr):
233 233 """List phases root for serialisation over pushkey"""
234 234 lock = repo.lock()
235 235 try:
236 236 currentphase = repo[nhex].phase()
237 237 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
238 238 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
239 239 if currentphase == oldphase and newphase < oldphase:
240 240 advanceboundary(repo, newphase, [bin(nhex)])
241 241 return 1
242 242 else:
243 243 return 0
244 244 finally:
245 245 lock.release()
246 246
247 247 def visibleheads(repo):
248 248 """return the set of visible head of this repo"""
249 249 # XXX we want a cache on this
250 250 sroots = repo._phaseroots[secret]
251 251 if sroots:
252 252 # XXX very slow revset. storing heads or secret "boundary" would help.
253 253 revset = repo.set('heads(not (%ln::))', sroots)
254 254
255 255 vheads = [ctx.node() for ctx in revset]
256 256 if not vheads:
257 257 vheads.append(nullid)
258 258 else:
259 259 vheads = repo.heads()
260 260 return vheads
261 261
262 262 def analyzeremotephases(repo, subset, roots):
263 263 """Compute phases heads and root in a subset of node from root dict
264 264
265 265 * subset is heads of the subset
266 266 * roots is {<nodeid> => phase} mapping. key and value are string.
267 267
268 268 Accept unknown element input
269 269 """
270 270 # build list from dictionary
271 271 draftroots = []
272 272 nodemap = repo.changelog.nodemap # to filter unknown nodes
273 273 for nhex, phase in roots.iteritems():
274 274 if nhex == 'publishing': # ignore data related to publish option
275 275 continue
276 276 node = bin(nhex)
277 277 phase = int(phase)
278 278 if phase == 0:
279 279 if node != nullid:
280 280 repo.ui.warn(_('ignoring inconsistent public root'
281 281 ' from remote: %s\n') % nhex)
282 282 elif phase == 1:
283 283 if node in nodemap:
284 284 draftroots.append(node)
285 285 else:
286 286 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
287 287 % (phase, nhex))
288 288 # compute heads
289 289 publicheads = newheads(repo, subset, draftroots)
290 290 return publicheads, draftroots
291 291
292 292 def newheads(repo, heads, roots):
293 293 """compute new head of a subset minus another
294 294
295 295 * `heads`: define the first subset
296 296 * `rroots`: define the second we substract to the first"""
297 297 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
298 298 heads, roots, roots, heads)
299 299 return [c.node() for c in revset]
300 300
301
302 def newcommitphase(ui):
303 """helper to get the target phase of new commit
304
305 Handle all possible values for the phases.new-commit options.
306
307 """
308 v = ui.config('phases', 'new-commit', draft)
309 try:
310 return phasenames.index(v)
311 except ValueError:
312 try:
313 return int(v)
314 except ValueError:
315 msg = _("phases.new-commit: not a valid phase name ('%s')")
316 raise error.ConfigError(msg % v)
317
@@ -1,404 +1,404 b''
1 1 $ hglog() { hg log --template "{rev} {phaseidx} {desc}\n" $*; }
2 2 $ mkcommit() {
3 3 > echo "$1" > "$1"
4 4 > hg add "$1"
5 5 > message="$1"
6 6 > shift
7 7 > hg ci -m "$message" $*
8 8 > }
9 9
10 10 $ hg init initialrepo
11 11 $ cd initialrepo
12 12 $ mkcommit A
13 13
14 14 New commit are draft by default
15 15
16 16 $ hglog
17 17 0 1 A
18 18
19 19 Following commit are draft too
20 20
21 21 $ mkcommit B
22 22
23 23 $ hglog
24 24 1 1 B
25 25 0 1 A
26 26
27 27 Draft commit are properly created over public one:
28 28
29 29 $ hg phase --public .
30 30 $ hglog
31 31 1 0 B
32 32 0 0 A
33 33
34 34 $ mkcommit C
35 35 $ mkcommit D
36 36
37 37 $ hglog
38 38 3 1 D
39 39 2 1 C
40 40 1 0 B
41 41 0 0 A
42 42
43 43 Test creating changeset as secret
44 44
45 $ mkcommit E --config phases.new-commit=2
45 $ mkcommit E --config phases.new-commit='secret'
46 46 $ hglog
47 47 4 2 E
48 48 3 1 D
49 49 2 1 C
50 50 1 0 B
51 51 0 0 A
52 52
53 53 Test the secret property is inherited
54 54
55 55 $ mkcommit H
56 56 $ hglog
57 57 5 2 H
58 58 4 2 E
59 59 3 1 D
60 60 2 1 C
61 61 1 0 B
62 62 0 0 A
63 63
64 64 Even on merge
65 65
66 66 $ hg up -q 1
67 67 $ mkcommit "B'"
68 68 created new head
69 69 $ hglog
70 70 6 1 B'
71 71 5 2 H
72 72 4 2 E
73 73 3 1 D
74 74 2 1 C
75 75 1 0 B
76 76 0 0 A
77 77 $ hg merge 4 # E
78 78 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
79 79 (branch merge, don't forget to commit)
80 80 $ hg ci -m "merge B' and E"
81 81 $ hglog
82 82 7 2 merge B' and E
83 83 6 1 B'
84 84 5 2 H
85 85 4 2 E
86 86 3 1 D
87 87 2 1 C
88 88 1 0 B
89 89 0 0 A
90 90
91 91 Test secret changeset are not pushed
92 92
93 93 $ hg init ../push-dest
94 94 $ cat > ../push-dest/.hg/hgrc << EOF
95 95 > [phases]
96 96 > publish=False
97 97 > EOF
98 98 $ hg outgoing ../push-dest --template='{rev} {phase} {desc|firstline}\n'
99 99 comparing with ../push-dest
100 100 searching for changes
101 101 0 public A
102 102 1 public B
103 103 2 draft C
104 104 3 draft D
105 105 6 draft B'
106 106 $ hg outgoing -r default ../push-dest --template='{rev} {phase} {desc|firstline}\n'
107 107 comparing with ../push-dest
108 108 searching for changes
109 109 0 public A
110 110 1 public B
111 111 2 draft C
112 112 3 draft D
113 113 6 draft B'
114 114
115 115 $ hg push ../push-dest -f # force because we push multiple heads
116 116 pushing to ../push-dest
117 117 searching for changes
118 118 adding changesets
119 119 adding manifests
120 120 adding file changes
121 121 added 5 changesets with 5 changes to 5 files (+1 heads)
122 122 $ hglog
123 123 7 2 merge B' and E
124 124 6 1 B'
125 125 5 2 H
126 126 4 2 E
127 127 3 1 D
128 128 2 1 C
129 129 1 0 B
130 130 0 0 A
131 131 $ cd ../push-dest
132 132 $ hglog
133 133 4 1 B'
134 134 3 1 D
135 135 2 1 C
136 136 1 0 B
137 137 0 0 A
138 138 $ cd ..
139 139
140 140 Test secret changeset are not pull
141 141
142 142 $ hg init pull-dest
143 143 $ cd pull-dest
144 144 $ hg pull ../initialrepo
145 145 pulling from ../initialrepo
146 146 requesting all changes
147 147 adding changesets
148 148 adding manifests
149 149 adding file changes
150 150 added 5 changesets with 5 changes to 5 files (+1 heads)
151 151 (run 'hg heads' to see heads, 'hg merge' to merge)
152 152 $ hglog
153 153 4 0 B'
154 154 3 0 D
155 155 2 0 C
156 156 1 0 B
157 157 0 0 A
158 158 $ cd ..
159 159
160 160 But secret can still be bundled explicitly
161 161
162 162 $ cd initialrepo
163 163 $ hg bundle --base '4^' -r 'children(4)' ../secret-bundle.hg
164 164 4 changesets found
165 165 $ cd ..
166 166
167 167 Test secret changeset are not cloned
168 168 (during local clone)
169 169
170 170 $ hg clone -qU initialrepo clone-dest
171 171 $ hglog -R clone-dest
172 172 4 0 B'
173 173 3 0 D
174 174 2 0 C
175 175 1 0 B
176 176 0 0 A
177 177
178 178 Test revset
179 179
180 180 $ cd initialrepo
181 181 $ hglog -r 'public()'
182 182 0 0 A
183 183 1 0 B
184 184 $ hglog -r 'draft()'
185 185 2 1 C
186 186 3 1 D
187 187 6 1 B'
188 188 $ hglog -r 'secret()'
189 189 4 2 E
190 190 5 2 H
191 191 7 2 merge B' and E
192 192
193 193 test that phase are displayed in log at debug level
194 194
195 195 $ hg log --debug
196 196 changeset: 7:17a481b3bccb796c0521ae97903d81c52bfee4af
197 197 tag: tip
198 198 phase: secret
199 199 parent: 6:cf9fe039dfd67e829edf6522a45de057b5c86519
200 200 parent: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
201 201 manifest: 7:5e724ffacba267b2ab726c91fc8b650710deaaa8
202 202 user: test
203 203 date: Thu Jan 01 00:00:00 1970 +0000
204 204 files+: C D E
205 205 extra: branch=default
206 206 description:
207 207 merge B' and E
208 208
209 209
210 210 changeset: 6:cf9fe039dfd67e829edf6522a45de057b5c86519
211 211 phase: draft
212 212 parent: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
213 213 parent: -1:0000000000000000000000000000000000000000
214 214 manifest: 6:ab8bfef2392903058bf4ebb9e7746e8d7026b27a
215 215 user: test
216 216 date: Thu Jan 01 00:00:00 1970 +0000
217 217 files+: B'
218 218 extra: branch=default
219 219 description:
220 220 B'
221 221
222 222
223 223 changeset: 5:a030c6be5127abc010fcbff1851536552e6951a8
224 224 phase: secret
225 225 parent: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
226 226 parent: -1:0000000000000000000000000000000000000000
227 227 manifest: 5:5c710aa854874fe3d5fa7192e77bdb314cc08b5a
228 228 user: test
229 229 date: Thu Jan 01 00:00:00 1970 +0000
230 230 files+: H
231 231 extra: branch=default
232 232 description:
233 233 H
234 234
235 235
236 236 changeset: 4:a603bfb5a83e312131cebcd05353c217d4d21dde
237 237 phase: secret
238 238 parent: 3:b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e
239 239 parent: -1:0000000000000000000000000000000000000000
240 240 manifest: 4:7173fd1c27119750b959e3a0f47ed78abe75d6dc
241 241 user: test
242 242 date: Thu Jan 01 00:00:00 1970 +0000
243 243 files+: E
244 244 extra: branch=default
245 245 description:
246 246 E
247 247
248 248
249 249 changeset: 3:b3325c91a4d916bcc4cdc83ea3fe4ece46a42f6e
250 250 phase: draft
251 251 parent: 2:f838bfaca5c7226600ebcfd84f3c3c13a28d3757
252 252 parent: -1:0000000000000000000000000000000000000000
253 253 manifest: 3:6e1f4c47ecb533ffd0c8e52cdc88afb6cd39e20c
254 254 user: test
255 255 date: Thu Jan 01 00:00:00 1970 +0000
256 256 files+: D
257 257 extra: branch=default
258 258 description:
259 259 D
260 260
261 261
262 262 changeset: 2:f838bfaca5c7226600ebcfd84f3c3c13a28d3757
263 263 phase: draft
264 264 parent: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
265 265 parent: -1:0000000000000000000000000000000000000000
266 266 manifest: 2:66a5a01817fdf5239c273802b5b7618d051c89e4
267 267 user: test
268 268 date: Thu Jan 01 00:00:00 1970 +0000
269 269 files+: C
270 270 extra: branch=default
271 271 description:
272 272 C
273 273
274 274
275 275 changeset: 1:27547f69f25460a52fff66ad004e58da7ad3fb56
276 276 parent: 0:4a2df7238c3b48766b5e22fafbb8a2f506ec8256
277 277 parent: -1:0000000000000000000000000000000000000000
278 278 manifest: 1:cb5cbbc1bfbf24cc34b9e8c16914e9caa2d2a7fd
279 279 user: test
280 280 date: Thu Jan 01 00:00:00 1970 +0000
281 281 files+: B
282 282 extra: branch=default
283 283 description:
284 284 B
285 285
286 286
287 287 changeset: 0:4a2df7238c3b48766b5e22fafbb8a2f506ec8256
288 288 parent: -1:0000000000000000000000000000000000000000
289 289 parent: -1:0000000000000000000000000000000000000000
290 290 manifest: 0:007d8c9d88841325f5c6b06371b35b4e8a2b1a83
291 291 user: test
292 292 date: Thu Jan 01 00:00:00 1970 +0000
293 293 files+: A
294 294 extra: branch=default
295 295 description:
296 296 A
297 297
298 298
299 299
300 300 Test phase command
301 301 ===================
302 302
303 303 initial picture
304 304
305 305 $ cat >> $HGRCPATH << EOF
306 306 > [extensions]
307 307 > hgext.graphlog=
308 308 > EOF
309 309 $ hg log -G --template "{rev} {phase} {desc}\n"
310 310 @ 7 secret merge B' and E
311 311 |\
312 312 | o 6 draft B'
313 313 | |
314 314 +---o 5 secret H
315 315 | |
316 316 o | 4 secret E
317 317 | |
318 318 o | 3 draft D
319 319 | |
320 320 o | 2 draft C
321 321 |/
322 322 o 1 public B
323 323 |
324 324 o 0 public A
325 325
326 326
327 327 display changesets phase
328 328
329 329 (mixing -r and plain rev specification)
330 330
331 331 $ hg phase 1::4 -r 7
332 332 1: public
333 333 2: draft
334 334 3: draft
335 335 4: secret
336 336 7: secret
337 337
338 338
339 339 move changeset forward
340 340
341 341 (with -r option)
342 342
343 343 $ hg phase --public -r 2
344 344 $ hg log -G --template "{rev} {phase} {desc}\n"
345 345 @ 7 secret merge B' and E
346 346 |\
347 347 | o 6 draft B'
348 348 | |
349 349 +---o 5 secret H
350 350 | |
351 351 o | 4 secret E
352 352 | |
353 353 o | 3 draft D
354 354 | |
355 355 o | 2 public C
356 356 |/
357 357 o 1 public B
358 358 |
359 359 o 0 public A
360 360
361 361
362 362 move changeset backward
363 363
364 364 (without -r option)
365 365
366 366 $ hg phase --draft --force 2
367 367 $ hg log -G --template "{rev} {phase} {desc}\n"
368 368 @ 7 secret merge B' and E
369 369 |\
370 370 | o 6 draft B'
371 371 | |
372 372 +---o 5 secret H
373 373 | |
374 374 o | 4 secret E
375 375 | |
376 376 o | 3 draft D
377 377 | |
378 378 o | 2 draft C
379 379 |/
380 380 o 1 public B
381 381 |
382 382 o 0 public A
383 383
384 384
385 385 move changeset forward and backward
386 386
387 387 $ hg phase --draft --force 1::4
388 388 $ hg log -G --template "{rev} {phase} {desc}\n"
389 389 @ 7 secret merge B' and E
390 390 |\
391 391 | o 6 draft B'
392 392 | |
393 393 +---o 5 secret H
394 394 | |
395 395 o | 4 draft E
396 396 | |
397 397 o | 3 draft D
398 398 | |
399 399 o | 2 draft C
400 400 |/
401 401 o 1 draft B
402 402 |
403 403 o 0 public A
404 404
General Comments 0
You need to be logged in to leave comments. Login now