##// END OF EJS Templates
phases: simplify phase exchange and movement over pushkey...
Pierre-Yves David -
r15892:592b3d17 default
parent child Browse files
Show More
@@ -1,2288 +1,2248 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39 self._dirtyphases = False
40 40
41 41 try:
42 42 self.ui.readconfig(self.join("hgrc"), self.root)
43 43 extensions.loadall(self.ui)
44 44 except IOError:
45 45 pass
46 46
47 47 if not os.path.isdir(self.path):
48 48 if create:
49 49 if not os.path.exists(path):
50 50 util.makedirs(path)
51 51 util.makedir(self.path, notindexed=True)
52 52 requirements = ["revlogv1"]
53 53 if self.ui.configbool('format', 'usestore', True):
54 54 os.mkdir(os.path.join(self.path, "store"))
55 55 requirements.append("store")
56 56 if self.ui.configbool('format', 'usefncache', True):
57 57 requirements.append("fncache")
58 58 if self.ui.configbool('format', 'dotencode', True):
59 59 requirements.append('dotencode')
60 60 # create an invalid changelog
61 61 self.opener.append(
62 62 "00changelog.i",
63 63 '\0\0\0\2' # represents revlogv2
64 64 ' dummy changelog to prevent using the old repo layout'
65 65 )
66 66 if self.ui.configbool('format', 'generaldelta', False):
67 67 requirements.append("generaldelta")
68 68 requirements = set(requirements)
69 69 else:
70 70 raise error.RepoError(_("repository %s not found") % path)
71 71 elif create:
72 72 raise error.RepoError(_("repository %s already exists") % path)
73 73 else:
74 74 try:
75 75 requirements = scmutil.readrequires(self.opener, self.supported)
76 76 except IOError, inst:
77 77 if inst.errno != errno.ENOENT:
78 78 raise
79 79 requirements = set()
80 80
81 81 self.sharedpath = self.path
82 82 try:
83 83 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
84 84 if not os.path.exists(s):
85 85 raise error.RepoError(
86 86 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 87 self.sharedpath = s
88 88 except IOError, inst:
89 89 if inst.errno != errno.ENOENT:
90 90 raise
91 91
92 92 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 93 self.spath = self.store.path
94 94 self.sopener = self.store.opener
95 95 self.sjoin = self.store.join
96 96 self.opener.createmode = self.store.createmode
97 97 self._applyrequirements(requirements)
98 98 if create:
99 99 self._writerequirements()
100 100
101 101
102 102 self._branchcache = None
103 103 self._branchcachetip = None
104 104 self.filterpats = {}
105 105 self._datafilters = {}
106 106 self._transref = self._lockref = self._wlockref = None
107 107
108 108 # A cache for various files under .hg/ that tracks file changes,
109 109 # (used by the filecache decorator)
110 110 #
111 111 # Maps a property name to its util.filecacheentry
112 112 self._filecache = {}
113 113
114 114 def _applyrequirements(self, requirements):
115 115 self.requirements = requirements
116 116 openerreqs = set(('revlogv1', 'generaldelta'))
117 117 self.sopener.options = dict((r, 1) for r in requirements
118 118 if r in openerreqs)
119 119
120 120 def _writerequirements(self):
121 121 reqfile = self.opener("requires", "w")
122 122 for r in self.requirements:
123 123 reqfile.write("%s\n" % r)
124 124 reqfile.close()
125 125
126 126 def _checknested(self, path):
127 127 """Determine if path is a legal nested repository."""
128 128 if not path.startswith(self.root):
129 129 return False
130 130 subpath = path[len(self.root) + 1:]
131 131 normsubpath = util.pconvert(subpath)
132 132
133 133 # XXX: Checking against the current working copy is wrong in
134 134 # the sense that it can reject things like
135 135 #
136 136 # $ hg cat -r 10 sub/x.txt
137 137 #
138 138 # if sub/ is no longer a subrepository in the working copy
139 139 # parent revision.
140 140 #
141 141 # However, it can of course also allow things that would have
142 142 # been rejected before, such as the above cat command if sub/
143 143 # is a subrepository now, but was a normal directory before.
144 144 # The old path auditor would have rejected by mistake since it
145 145 # panics when it sees sub/.hg/.
146 146 #
147 147 # All in all, checking against the working copy seems sensible
148 148 # since we want to prevent access to nested repositories on
149 149 # the filesystem *now*.
150 150 ctx = self[None]
151 151 parts = util.splitpath(subpath)
152 152 while parts:
153 153 prefix = '/'.join(parts)
154 154 if prefix in ctx.substate:
155 155 if prefix == normsubpath:
156 156 return True
157 157 else:
158 158 sub = ctx.sub(prefix)
159 159 return sub.checknested(subpath[len(prefix) + 1:])
160 160 else:
161 161 parts.pop()
162 162 return False
163 163
164 164 @filecache('bookmarks')
165 165 def _bookmarks(self):
166 166 return bookmarks.read(self)
167 167
168 168 @filecache('bookmarks.current')
169 169 def _bookmarkcurrent(self):
170 170 return bookmarks.readcurrent(self)
171 171
172 172 def _writebookmarks(self, marks):
173 173 bookmarks.write(self)
174 174
175 175 @filecache('phaseroots')
176 176 def _phaseroots(self):
177 177 self._dirtyphases = False
178 178 phaseroots = phases.readroots(self)
179 179 phases.filterunknown(self, phaseroots)
180 180 return phaseroots
181 181
182 182 @propertycache
183 183 def _phaserev(self):
184 184 cache = [phases.public] * len(self)
185 185 for phase in phases.trackedphases:
186 186 roots = map(self.changelog.rev, self._phaseroots[phase])
187 187 if roots:
188 188 for rev in roots:
189 189 cache[rev] = phase
190 190 for rev in self.changelog.descendants(*roots):
191 191 cache[rev] = phase
192 192 return cache
193 193
194 194 @filecache('00changelog.i', True)
195 195 def changelog(self):
196 196 c = changelog.changelog(self.sopener)
197 197 if 'HG_PENDING' in os.environ:
198 198 p = os.environ['HG_PENDING']
199 199 if p.startswith(self.root):
200 200 c.readpending('00changelog.i.a')
201 201 return c
202 202
203 203 @filecache('00manifest.i', True)
204 204 def manifest(self):
205 205 return manifest.manifest(self.sopener)
206 206
207 207 @filecache('dirstate')
208 208 def dirstate(self):
209 209 warned = [0]
210 210 def validate(node):
211 211 try:
212 212 self.changelog.rev(node)
213 213 return node
214 214 except error.LookupError:
215 215 if not warned[0]:
216 216 warned[0] = True
217 217 self.ui.warn(_("warning: ignoring unknown"
218 218 " working parent %s!\n") % short(node))
219 219 return nullid
220 220
221 221 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
222 222
223 223 def __getitem__(self, changeid):
224 224 if changeid is None:
225 225 return context.workingctx(self)
226 226 return context.changectx(self, changeid)
227 227
228 228 def __contains__(self, changeid):
229 229 try:
230 230 return bool(self.lookup(changeid))
231 231 except error.RepoLookupError:
232 232 return False
233 233
234 234 def __nonzero__(self):
235 235 return True
236 236
237 237 def __len__(self):
238 238 return len(self.changelog)
239 239
240 240 def __iter__(self):
241 241 for i in xrange(len(self)):
242 242 yield i
243 243
244 244 def revs(self, expr, *args):
245 245 '''Return a list of revisions matching the given revset'''
246 246 expr = revset.formatspec(expr, *args)
247 247 m = revset.match(None, expr)
248 248 return [r for r in m(self, range(len(self)))]
249 249
250 250 def set(self, expr, *args):
251 251 '''
252 252 Yield a context for each matching revision, after doing arg
253 253 replacement via revset.formatspec
254 254 '''
255 255 for r in self.revs(expr, *args):
256 256 yield self[r]
257 257
258 258 def url(self):
259 259 return 'file:' + self.root
260 260
261 261 def hook(self, name, throw=False, **args):
262 262 return hook.hook(self.ui, self, name, throw, **args)
263 263
264 264 tag_disallowed = ':\r\n'
265 265
266 266 def _tag(self, names, node, message, local, user, date, extra={}):
267 267 if isinstance(names, str):
268 268 allchars = names
269 269 names = (names,)
270 270 else:
271 271 allchars = ''.join(names)
272 272 for c in self.tag_disallowed:
273 273 if c in allchars:
274 274 raise util.Abort(_('%r cannot be used in a tag name') % c)
275 275
276 276 branches = self.branchmap()
277 277 for name in names:
278 278 self.hook('pretag', throw=True, node=hex(node), tag=name,
279 279 local=local)
280 280 if name in branches:
281 281 self.ui.warn(_("warning: tag %s conflicts with existing"
282 282 " branch name\n") % name)
283 283
284 284 def writetags(fp, names, munge, prevtags):
285 285 fp.seek(0, 2)
286 286 if prevtags and prevtags[-1] != '\n':
287 287 fp.write('\n')
288 288 for name in names:
289 289 m = munge and munge(name) or name
290 290 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
291 291 old = self.tags().get(name, nullid)
292 292 fp.write('%s %s\n' % (hex(old), m))
293 293 fp.write('%s %s\n' % (hex(node), m))
294 294 fp.close()
295 295
296 296 prevtags = ''
297 297 if local:
298 298 try:
299 299 fp = self.opener('localtags', 'r+')
300 300 except IOError:
301 301 fp = self.opener('localtags', 'a')
302 302 else:
303 303 prevtags = fp.read()
304 304
305 305 # local tags are stored in the current charset
306 306 writetags(fp, names, None, prevtags)
307 307 for name in names:
308 308 self.hook('tag', node=hex(node), tag=name, local=local)
309 309 return
310 310
311 311 try:
312 312 fp = self.wfile('.hgtags', 'rb+')
313 313 except IOError, e:
314 314 if e.errno != errno.ENOENT:
315 315 raise
316 316 fp = self.wfile('.hgtags', 'ab')
317 317 else:
318 318 prevtags = fp.read()
319 319
320 320 # committed tags are stored in UTF-8
321 321 writetags(fp, names, encoding.fromlocal, prevtags)
322 322
323 323 fp.close()
324 324
325 325 if '.hgtags' not in self.dirstate:
326 326 self[None].add(['.hgtags'])
327 327
328 328 m = matchmod.exact(self.root, '', ['.hgtags'])
329 329 tagnode = self.commit(message, user, date, extra=extra, match=m)
330 330
331 331 for name in names:
332 332 self.hook('tag', node=hex(node), tag=name, local=local)
333 333
334 334 return tagnode
335 335
336 336 def tag(self, names, node, message, local, user, date):
337 337 '''tag a revision with one or more symbolic names.
338 338
339 339 names is a list of strings or, when adding a single tag, names may be a
340 340 string.
341 341
342 342 if local is True, the tags are stored in a per-repository file.
343 343 otherwise, they are stored in the .hgtags file, and a new
344 344 changeset is committed with the change.
345 345
346 346 keyword arguments:
347 347
348 348 local: whether to store tags in non-version-controlled file
349 349 (default False)
350 350
351 351 message: commit message to use if committing
352 352
353 353 user: name of user to use if committing
354 354
355 355 date: date tuple to use if committing'''
356 356
357 357 if not local:
358 358 for x in self.status()[:5]:
359 359 if '.hgtags' in x:
360 360 raise util.Abort(_('working copy of .hgtags is changed '
361 361 '(please commit .hgtags manually)'))
362 362
363 363 self.tags() # instantiate the cache
364 364 self._tag(names, node, message, local, user, date)
365 365
366 366 @propertycache
367 367 def _tagscache(self):
368 368 '''Returns a tagscache object that contains various tags related caches.'''
369 369
370 370 # This simplifies its cache management by having one decorated
371 371 # function (this one) and the rest simply fetch things from it.
372 372 class tagscache(object):
373 373 def __init__(self):
374 374 # These two define the set of tags for this repository. tags
375 375 # maps tag name to node; tagtypes maps tag name to 'global' or
376 376 # 'local'. (Global tags are defined by .hgtags across all
377 377 # heads, and local tags are defined in .hg/localtags.)
378 378 # They constitute the in-memory cache of tags.
379 379 self.tags = self.tagtypes = None
380 380
381 381 self.nodetagscache = self.tagslist = None
382 382
383 383 cache = tagscache()
384 384 cache.tags, cache.tagtypes = self._findtags()
385 385
386 386 return cache
387 387
388 388 def tags(self):
389 389 '''return a mapping of tag to node'''
390 390 return self._tagscache.tags
391 391
392 392 def _findtags(self):
393 393 '''Do the hard work of finding tags. Return a pair of dicts
394 394 (tags, tagtypes) where tags maps tag name to node, and tagtypes
395 395 maps tag name to a string like \'global\' or \'local\'.
396 396 Subclasses or extensions are free to add their own tags, but
397 397 should be aware that the returned dicts will be retained for the
398 398 duration of the localrepo object.'''
399 399
400 400 # XXX what tagtype should subclasses/extensions use? Currently
401 401 # mq and bookmarks add tags, but do not set the tagtype at all.
402 402 # Should each extension invent its own tag type? Should there
403 403 # be one tagtype for all such "virtual" tags? Or is the status
404 404 # quo fine?
405 405
406 406 alltags = {} # map tag name to (node, hist)
407 407 tagtypes = {}
408 408
409 409 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
410 410 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
411 411
412 412 # Build the return dicts. Have to re-encode tag names because
413 413 # the tags module always uses UTF-8 (in order not to lose info
414 414 # writing to the cache), but the rest of Mercurial wants them in
415 415 # local encoding.
416 416 tags = {}
417 417 for (name, (node, hist)) in alltags.iteritems():
418 418 if node != nullid:
419 419 try:
420 420 # ignore tags to unknown nodes
421 421 self.changelog.lookup(node)
422 422 tags[encoding.tolocal(name)] = node
423 423 except error.LookupError:
424 424 pass
425 425 tags['tip'] = self.changelog.tip()
426 426 tagtypes = dict([(encoding.tolocal(name), value)
427 427 for (name, value) in tagtypes.iteritems()])
428 428 return (tags, tagtypes)
429 429
430 430 def tagtype(self, tagname):
431 431 '''
432 432 return the type of the given tag. result can be:
433 433
434 434 'local' : a local tag
435 435 'global' : a global tag
436 436 None : tag does not exist
437 437 '''
438 438
439 439 return self._tagscache.tagtypes.get(tagname)
440 440
441 441 def tagslist(self):
442 442 '''return a list of tags ordered by revision'''
443 443 if not self._tagscache.tagslist:
444 444 l = []
445 445 for t, n in self.tags().iteritems():
446 446 r = self.changelog.rev(n)
447 447 l.append((r, t, n))
448 448 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
449 449
450 450 return self._tagscache.tagslist
451 451
452 452 def nodetags(self, node):
453 453 '''return the tags associated with a node'''
454 454 if not self._tagscache.nodetagscache:
455 455 nodetagscache = {}
456 456 for t, n in self.tags().iteritems():
457 457 nodetagscache.setdefault(n, []).append(t)
458 458 for tags in nodetagscache.itervalues():
459 459 tags.sort()
460 460 self._tagscache.nodetagscache = nodetagscache
461 461 return self._tagscache.nodetagscache.get(node, [])
462 462
463 463 def nodebookmarks(self, node):
464 464 marks = []
465 465 for bookmark, n in self._bookmarks.iteritems():
466 466 if n == node:
467 467 marks.append(bookmark)
468 468 return sorted(marks)
469 469
470 470 def _branchtags(self, partial, lrev):
471 471 # TODO: rename this function?
472 472 tiprev = len(self) - 1
473 473 if lrev != tiprev:
474 474 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
475 475 self._updatebranchcache(partial, ctxgen)
476 476 self._writebranchcache(partial, self.changelog.tip(), tiprev)
477 477
478 478 return partial
479 479
480 480 def updatebranchcache(self):
481 481 tip = self.changelog.tip()
482 482 if self._branchcache is not None and self._branchcachetip == tip:
483 483 return
484 484
485 485 oldtip = self._branchcachetip
486 486 self._branchcachetip = tip
487 487 if oldtip is None or oldtip not in self.changelog.nodemap:
488 488 partial, last, lrev = self._readbranchcache()
489 489 else:
490 490 lrev = self.changelog.rev(oldtip)
491 491 partial = self._branchcache
492 492
493 493 self._branchtags(partial, lrev)
494 494 # this private cache holds all heads (not just tips)
495 495 self._branchcache = partial
496 496
497 497 def branchmap(self):
498 498 '''returns a dictionary {branch: [branchheads]}'''
499 499 self.updatebranchcache()
500 500 return self._branchcache
501 501
502 502 def branchtags(self):
503 503 '''return a dict where branch names map to the tipmost head of
504 504 the branch, open heads come before closed'''
505 505 bt = {}
506 506 for bn, heads in self.branchmap().iteritems():
507 507 tip = heads[-1]
508 508 for h in reversed(heads):
509 509 if 'close' not in self.changelog.read(h)[5]:
510 510 tip = h
511 511 break
512 512 bt[bn] = tip
513 513 return bt
514 514
515 515 def _readbranchcache(self):
516 516 partial = {}
517 517 try:
518 518 f = self.opener("cache/branchheads")
519 519 lines = f.read().split('\n')
520 520 f.close()
521 521 except (IOError, OSError):
522 522 return {}, nullid, nullrev
523 523
524 524 try:
525 525 last, lrev = lines.pop(0).split(" ", 1)
526 526 last, lrev = bin(last), int(lrev)
527 527 if lrev >= len(self) or self[lrev].node() != last:
528 528 # invalidate the cache
529 529 raise ValueError('invalidating branch cache (tip differs)')
530 530 for l in lines:
531 531 if not l:
532 532 continue
533 533 node, label = l.split(" ", 1)
534 534 label = encoding.tolocal(label.strip())
535 535 partial.setdefault(label, []).append(bin(node))
536 536 except KeyboardInterrupt:
537 537 raise
538 538 except Exception, inst:
539 539 if self.ui.debugflag:
540 540 self.ui.warn(str(inst), '\n')
541 541 partial, last, lrev = {}, nullid, nullrev
542 542 return partial, last, lrev
543 543
544 544 def _writebranchcache(self, branches, tip, tiprev):
545 545 try:
546 546 f = self.opener("cache/branchheads", "w", atomictemp=True)
547 547 f.write("%s %s\n" % (hex(tip), tiprev))
548 548 for label, nodes in branches.iteritems():
549 549 for node in nodes:
550 550 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
551 551 f.close()
552 552 except (IOError, OSError):
553 553 pass
554 554
555 555 def _updatebranchcache(self, partial, ctxgen):
556 556 # collect new branch entries
557 557 newbranches = {}
558 558 for c in ctxgen:
559 559 newbranches.setdefault(c.branch(), []).append(c.node())
560 560 # if older branchheads are reachable from new ones, they aren't
561 561 # really branchheads. Note checking parents is insufficient:
562 562 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
563 563 for branch, newnodes in newbranches.iteritems():
564 564 bheads = partial.setdefault(branch, [])
565 565 bheads.extend(newnodes)
566 566 if len(bheads) <= 1:
567 567 continue
568 568 bheads = sorted(bheads, key=lambda x: self[x].rev())
569 569 # starting from tip means fewer passes over reachable
570 570 while newnodes:
571 571 latest = newnodes.pop()
572 572 if latest not in bheads:
573 573 continue
574 574 minbhrev = self[bheads[0]].node()
575 575 reachable = self.changelog.reachable(latest, minbhrev)
576 576 reachable.remove(latest)
577 577 if reachable:
578 578 bheads = [b for b in bheads if b not in reachable]
579 579 partial[branch] = bheads
580 580
581 581 def lookup(self, key):
582 582 if isinstance(key, int):
583 583 return self.changelog.node(key)
584 584 elif key == '.':
585 585 return self.dirstate.p1()
586 586 elif key == 'null':
587 587 return nullid
588 588 elif key == 'tip':
589 589 return self.changelog.tip()
590 590 n = self.changelog._match(key)
591 591 if n:
592 592 return n
593 593 if key in self._bookmarks:
594 594 return self._bookmarks[key]
595 595 if key in self.tags():
596 596 return self.tags()[key]
597 597 if key in self.branchtags():
598 598 return self.branchtags()[key]
599 599 n = self.changelog._partialmatch(key)
600 600 if n:
601 601 return n
602 602
603 603 # can't find key, check if it might have come from damaged dirstate
604 604 if key in self.dirstate.parents():
605 605 raise error.Abort(_("working directory has unknown parent '%s'!")
606 606 % short(key))
607 607 try:
608 608 if len(key) == 20:
609 609 key = hex(key)
610 610 except TypeError:
611 611 pass
612 612 raise error.RepoLookupError(_("unknown revision '%s'") % key)
613 613
614 614 def lookupbranch(self, key, remote=None):
615 615 repo = remote or self
616 616 if key in repo.branchmap():
617 617 return key
618 618
619 619 repo = (remote and remote.local()) and remote or self
620 620 return repo[key].branch()
621 621
622 622 def known(self, nodes):
623 623 nm = self.changelog.nodemap
624 624 result = []
625 625 for n in nodes:
626 626 r = nm.get(n)
627 627 resp = not (r is None or self._phaserev[r] >= phases.secret)
628 628 result.append(resp)
629 629 return result
630 630
631 631 def local(self):
632 632 return self
633 633
634 634 def cancopy(self):
635 635 return (repo.repository.cancopy(self)
636 636 and not self._phaseroots[phases.secret])
637 637
638 638 def join(self, f):
639 639 return os.path.join(self.path, f)
640 640
641 641 def wjoin(self, f):
642 642 return os.path.join(self.root, f)
643 643
644 644 def file(self, f):
645 645 if f[0] == '/':
646 646 f = f[1:]
647 647 return filelog.filelog(self.sopener, f)
648 648
649 649 def changectx(self, changeid):
650 650 return self[changeid]
651 651
652 652 def parents(self, changeid=None):
653 653 '''get list of changectxs for parents of changeid'''
654 654 return self[changeid].parents()
655 655
656 656 def filectx(self, path, changeid=None, fileid=None):
657 657 """changeid can be a changeset revision, node, or tag.
658 658 fileid can be a file revision or node."""
659 659 return context.filectx(self, path, changeid, fileid)
660 660
661 661 def getcwd(self):
662 662 return self.dirstate.getcwd()
663 663
664 664 def pathto(self, f, cwd=None):
665 665 return self.dirstate.pathto(f, cwd)
666 666
667 667 def wfile(self, f, mode='r'):
668 668 return self.wopener(f, mode)
669 669
670 670 def _link(self, f):
671 671 return os.path.islink(self.wjoin(f))
672 672
673 673 def _loadfilter(self, filter):
674 674 if filter not in self.filterpats:
675 675 l = []
676 676 for pat, cmd in self.ui.configitems(filter):
677 677 if cmd == '!':
678 678 continue
679 679 mf = matchmod.match(self.root, '', [pat])
680 680 fn = None
681 681 params = cmd
682 682 for name, filterfn in self._datafilters.iteritems():
683 683 if cmd.startswith(name):
684 684 fn = filterfn
685 685 params = cmd[len(name):].lstrip()
686 686 break
687 687 if not fn:
688 688 fn = lambda s, c, **kwargs: util.filter(s, c)
689 689 # Wrap old filters not supporting keyword arguments
690 690 if not inspect.getargspec(fn)[2]:
691 691 oldfn = fn
692 692 fn = lambda s, c, **kwargs: oldfn(s, c)
693 693 l.append((mf, fn, params))
694 694 self.filterpats[filter] = l
695 695 return self.filterpats[filter]
696 696
697 697 def _filter(self, filterpats, filename, data):
698 698 for mf, fn, cmd in filterpats:
699 699 if mf(filename):
700 700 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
701 701 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
702 702 break
703 703
704 704 return data
705 705
706 706 @propertycache
707 707 def _encodefilterpats(self):
708 708 return self._loadfilter('encode')
709 709
710 710 @propertycache
711 711 def _decodefilterpats(self):
712 712 return self._loadfilter('decode')
713 713
714 714 def adddatafilter(self, name, filter):
715 715 self._datafilters[name] = filter
716 716
717 717 def wread(self, filename):
718 718 if self._link(filename):
719 719 data = os.readlink(self.wjoin(filename))
720 720 else:
721 721 data = self.wopener.read(filename)
722 722 return self._filter(self._encodefilterpats, filename, data)
723 723
724 724 def wwrite(self, filename, data, flags):
725 725 data = self._filter(self._decodefilterpats, filename, data)
726 726 if 'l' in flags:
727 727 self.wopener.symlink(data, filename)
728 728 else:
729 729 self.wopener.write(filename, data)
730 730 if 'x' in flags:
731 731 util.setflags(self.wjoin(filename), False, True)
732 732
733 733 def wwritedata(self, filename, data):
734 734 return self._filter(self._decodefilterpats, filename, data)
735 735
736 736 def transaction(self, desc):
737 737 tr = self._transref and self._transref() or None
738 738 if tr and tr.running():
739 739 return tr.nest()
740 740
741 741 # abort here if the journal already exists
742 742 if os.path.exists(self.sjoin("journal")):
743 743 raise error.RepoError(
744 744 _("abandoned transaction found - run hg recover"))
745 745
746 746 journalfiles = self._writejournal(desc)
747 747 renames = [(x, undoname(x)) for x in journalfiles]
748 748
749 749 tr = transaction.transaction(self.ui.warn, self.sopener,
750 750 self.sjoin("journal"),
751 751 aftertrans(renames),
752 752 self.store.createmode)
753 753 self._transref = weakref.ref(tr)
754 754 return tr
755 755
756 756 def _writejournal(self, desc):
757 757 # save dirstate for rollback
758 758 try:
759 759 ds = self.opener.read("dirstate")
760 760 except IOError:
761 761 ds = ""
762 762 self.opener.write("journal.dirstate", ds)
763 763 self.opener.write("journal.branch",
764 764 encoding.fromlocal(self.dirstate.branch()))
765 765 self.opener.write("journal.desc",
766 766 "%d\n%s\n" % (len(self), desc))
767 767
768 768 bkname = self.join('bookmarks')
769 769 if os.path.exists(bkname):
770 770 util.copyfile(bkname, self.join('journal.bookmarks'))
771 771 else:
772 772 self.opener.write('journal.bookmarks', '')
773 773 phasesname = self.sjoin('phaseroots')
774 774 if os.path.exists(phasesname):
775 775 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
776 776 else:
777 777 self.sopener.write('journal.phaseroots', '')
778 778
779 779 return (self.sjoin('journal'), self.join('journal.dirstate'),
780 780 self.join('journal.branch'), self.join('journal.desc'),
781 781 self.join('journal.bookmarks'),
782 782 self.sjoin('journal.phaseroots'))
783 783
784 784 def recover(self):
785 785 lock = self.lock()
786 786 try:
787 787 if os.path.exists(self.sjoin("journal")):
788 788 self.ui.status(_("rolling back interrupted transaction\n"))
789 789 transaction.rollback(self.sopener, self.sjoin("journal"),
790 790 self.ui.warn)
791 791 self.invalidate()
792 792 return True
793 793 else:
794 794 self.ui.warn(_("no interrupted transaction available\n"))
795 795 return False
796 796 finally:
797 797 lock.release()
798 798
799 799 def rollback(self, dryrun=False, force=False):
800 800 wlock = lock = None
801 801 try:
802 802 wlock = self.wlock()
803 803 lock = self.lock()
804 804 if os.path.exists(self.sjoin("undo")):
805 805 return self._rollback(dryrun, force)
806 806 else:
807 807 self.ui.warn(_("no rollback information available\n"))
808 808 return 1
809 809 finally:
810 810 release(lock, wlock)
811 811
812 812 def _rollback(self, dryrun, force):
813 813 ui = self.ui
814 814 try:
815 815 args = self.opener.read('undo.desc').splitlines()
816 816 (oldlen, desc, detail) = (int(args[0]), args[1], None)
817 817 if len(args) >= 3:
818 818 detail = args[2]
819 819 oldtip = oldlen - 1
820 820
821 821 if detail and ui.verbose:
822 822 msg = (_('repository tip rolled back to revision %s'
823 823 ' (undo %s: %s)\n')
824 824 % (oldtip, desc, detail))
825 825 else:
826 826 msg = (_('repository tip rolled back to revision %s'
827 827 ' (undo %s)\n')
828 828 % (oldtip, desc))
829 829 except IOError:
830 830 msg = _('rolling back unknown transaction\n')
831 831 desc = None
832 832
833 833 if not force and self['.'] != self['tip'] and desc == 'commit':
834 834 raise util.Abort(
835 835 _('rollback of last commit while not checked out '
836 836 'may lose data'), hint=_('use -f to force'))
837 837
838 838 ui.status(msg)
839 839 if dryrun:
840 840 return 0
841 841
842 842 parents = self.dirstate.parents()
843 843 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
844 844 if os.path.exists(self.join('undo.bookmarks')):
845 845 util.rename(self.join('undo.bookmarks'),
846 846 self.join('bookmarks'))
847 847 if os.path.exists(self.sjoin('undo.phaseroots')):
848 848 util.rename(self.sjoin('undo.phaseroots'),
849 849 self.sjoin('phaseroots'))
850 850 self.invalidate()
851 851
852 852 parentgone = (parents[0] not in self.changelog.nodemap or
853 853 parents[1] not in self.changelog.nodemap)
854 854 if parentgone:
855 855 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
856 856 try:
857 857 branch = self.opener.read('undo.branch')
858 858 self.dirstate.setbranch(branch)
859 859 except IOError:
860 860 ui.warn(_('named branch could not be reset: '
861 861 'current branch is still \'%s\'\n')
862 862 % self.dirstate.branch())
863 863
864 864 self.dirstate.invalidate()
865 865 parents = tuple([p.rev() for p in self.parents()])
866 866 if len(parents) > 1:
867 867 ui.status(_('working directory now based on '
868 868 'revisions %d and %d\n') % parents)
869 869 else:
870 870 ui.status(_('working directory now based on '
871 871 'revision %d\n') % parents)
872 872 self.destroyed()
873 873 return 0
874 874
875 875 def invalidatecaches(self):
876 876 try:
877 877 delattr(self, '_tagscache')
878 878 except AttributeError:
879 879 pass
880 880
881 881 self._branchcache = None # in UTF-8
882 882 self._branchcachetip = None
883 883
884 884 def invalidatedirstate(self):
885 885 '''Invalidates the dirstate, causing the next call to dirstate
886 886 to check if it was modified since the last time it was read,
887 887 rereading it if it has.
888 888
889 889 This is different to dirstate.invalidate() that it doesn't always
890 890 rereads the dirstate. Use dirstate.invalidate() if you want to
891 891 explicitly read the dirstate again (i.e. restoring it to a previous
892 892 known good state).'''
893 893 try:
894 894 delattr(self, 'dirstate')
895 895 except AttributeError:
896 896 pass
897 897
898 898 def invalidate(self):
899 899 for k in self._filecache:
900 900 # dirstate is invalidated separately in invalidatedirstate()
901 901 if k == 'dirstate':
902 902 continue
903 903
904 904 try:
905 905 delattr(self, k)
906 906 except AttributeError:
907 907 pass
908 908 self.invalidatecaches()
909 909
910 910 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
911 911 try:
912 912 l = lock.lock(lockname, 0, releasefn, desc=desc)
913 913 except error.LockHeld, inst:
914 914 if not wait:
915 915 raise
916 916 self.ui.warn(_("waiting for lock on %s held by %r\n") %
917 917 (desc, inst.locker))
918 918 # default to 600 seconds timeout
919 919 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
920 920 releasefn, desc=desc)
921 921 if acquirefn:
922 922 acquirefn()
923 923 return l
924 924
925 925 def _afterlock(self, callback):
926 926 """add a callback to the current repository lock.
927 927
928 928 The callback will be executed on lock release."""
929 929 l = self._lockref and self._lockref()
930 930 if l:
931 931 l.postrelease.append(callback)
932 932
933 933 def lock(self, wait=True):
934 934 '''Lock the repository store (.hg/store) and return a weak reference
935 935 to the lock. Use this before modifying the store (e.g. committing or
936 936 stripping). If you are opening a transaction, get a lock as well.)'''
937 937 l = self._lockref and self._lockref()
938 938 if l is not None and l.held:
939 939 l.lock()
940 940 return l
941 941
942 942 def unlock():
943 943 self.store.write()
944 944 if self._dirtyphases:
945 945 phases.writeroots(self)
946 946 for k, ce in self._filecache.items():
947 947 if k == 'dirstate':
948 948 continue
949 949 ce.refresh()
950 950
951 951 l = self._lock(self.sjoin("lock"), wait, unlock,
952 952 self.invalidate, _('repository %s') % self.origroot)
953 953 self._lockref = weakref.ref(l)
954 954 return l
955 955
956 956 def wlock(self, wait=True):
957 957 '''Lock the non-store parts of the repository (everything under
958 958 .hg except .hg/store) and return a weak reference to the lock.
959 959 Use this before modifying files in .hg.'''
960 960 l = self._wlockref and self._wlockref()
961 961 if l is not None and l.held:
962 962 l.lock()
963 963 return l
964 964
965 965 def unlock():
966 966 self.dirstate.write()
967 967 ce = self._filecache.get('dirstate')
968 968 if ce:
969 969 ce.refresh()
970 970
971 971 l = self._lock(self.join("wlock"), wait, unlock,
972 972 self.invalidatedirstate, _('working directory of %s') %
973 973 self.origroot)
974 974 self._wlockref = weakref.ref(l)
975 975 return l
976 976
977 977 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
978 978 """
979 979 commit an individual file as part of a larger transaction
980 980 """
981 981
982 982 fname = fctx.path()
983 983 text = fctx.data()
984 984 flog = self.file(fname)
985 985 fparent1 = manifest1.get(fname, nullid)
986 986 fparent2 = fparent2o = manifest2.get(fname, nullid)
987 987
988 988 meta = {}
989 989 copy = fctx.renamed()
990 990 if copy and copy[0] != fname:
991 991 # Mark the new revision of this file as a copy of another
992 992 # file. This copy data will effectively act as a parent
993 993 # of this new revision. If this is a merge, the first
994 994 # parent will be the nullid (meaning "look up the copy data")
995 995 # and the second one will be the other parent. For example:
996 996 #
997 997 # 0 --- 1 --- 3 rev1 changes file foo
998 998 # \ / rev2 renames foo to bar and changes it
999 999 # \- 2 -/ rev3 should have bar with all changes and
1000 1000 # should record that bar descends from
1001 1001 # bar in rev2 and foo in rev1
1002 1002 #
1003 1003 # this allows this merge to succeed:
1004 1004 #
1005 1005 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1006 1006 # \ / merging rev3 and rev4 should use bar@rev2
1007 1007 # \- 2 --- 4 as the merge base
1008 1008 #
1009 1009
1010 1010 cfname = copy[0]
1011 1011 crev = manifest1.get(cfname)
1012 1012 newfparent = fparent2
1013 1013
1014 1014 if manifest2: # branch merge
1015 1015 if fparent2 == nullid or crev is None: # copied on remote side
1016 1016 if cfname in manifest2:
1017 1017 crev = manifest2[cfname]
1018 1018 newfparent = fparent1
1019 1019
1020 1020 # find source in nearest ancestor if we've lost track
1021 1021 if not crev:
1022 1022 self.ui.debug(" %s: searching for copy revision for %s\n" %
1023 1023 (fname, cfname))
1024 1024 for ancestor in self[None].ancestors():
1025 1025 if cfname in ancestor:
1026 1026 crev = ancestor[cfname].filenode()
1027 1027 break
1028 1028
1029 1029 if crev:
1030 1030 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1031 1031 meta["copy"] = cfname
1032 1032 meta["copyrev"] = hex(crev)
1033 1033 fparent1, fparent2 = nullid, newfparent
1034 1034 else:
1035 1035 self.ui.warn(_("warning: can't find ancestor for '%s' "
1036 1036 "copied from '%s'!\n") % (fname, cfname))
1037 1037
1038 1038 elif fparent2 != nullid:
1039 1039 # is one parent an ancestor of the other?
1040 1040 fparentancestor = flog.ancestor(fparent1, fparent2)
1041 1041 if fparentancestor == fparent1:
1042 1042 fparent1, fparent2 = fparent2, nullid
1043 1043 elif fparentancestor == fparent2:
1044 1044 fparent2 = nullid
1045 1045
1046 1046 # is the file changed?
1047 1047 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1048 1048 changelist.append(fname)
1049 1049 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1050 1050
1051 1051 # are just the flags changed during merge?
1052 1052 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1053 1053 changelist.append(fname)
1054 1054
1055 1055 return fparent1
1056 1056
1057 1057 def commit(self, text="", user=None, date=None, match=None, force=False,
1058 1058 editor=False, extra={}):
1059 1059 """Add a new revision to current repository.
1060 1060
1061 1061 Revision information is gathered from the working directory,
1062 1062 match can be used to filter the committed files. If editor is
1063 1063 supplied, it is called to get a commit message.
1064 1064 """
1065 1065
1066 1066 def fail(f, msg):
1067 1067 raise util.Abort('%s: %s' % (f, msg))
1068 1068
1069 1069 if not match:
1070 1070 match = matchmod.always(self.root, '')
1071 1071
1072 1072 if not force:
1073 1073 vdirs = []
1074 1074 match.dir = vdirs.append
1075 1075 match.bad = fail
1076 1076
1077 1077 wlock = self.wlock()
1078 1078 try:
1079 1079 wctx = self[None]
1080 1080 merge = len(wctx.parents()) > 1
1081 1081
1082 1082 if (not force and merge and match and
1083 1083 (match.files() or match.anypats())):
1084 1084 raise util.Abort(_('cannot partially commit a merge '
1085 1085 '(do not specify files or patterns)'))
1086 1086
1087 1087 changes = self.status(match=match, clean=force)
1088 1088 if force:
1089 1089 changes[0].extend(changes[6]) # mq may commit unchanged files
1090 1090
1091 1091 # check subrepos
1092 1092 subs = []
1093 1093 removedsubs = set()
1094 1094 if '.hgsub' in wctx:
1095 1095 # only manage subrepos and .hgsubstate if .hgsub is present
1096 1096 for p in wctx.parents():
1097 1097 removedsubs.update(s for s in p.substate if match(s))
1098 1098 for s in wctx.substate:
1099 1099 removedsubs.discard(s)
1100 1100 if match(s) and wctx.sub(s).dirty():
1101 1101 subs.append(s)
1102 1102 if (subs or removedsubs):
1103 1103 if (not match('.hgsub') and
1104 1104 '.hgsub' in (wctx.modified() + wctx.added())):
1105 1105 raise util.Abort(
1106 1106 _("can't commit subrepos without .hgsub"))
1107 1107 if '.hgsubstate' not in changes[0]:
1108 1108 changes[0].insert(0, '.hgsubstate')
1109 1109 if '.hgsubstate' in changes[2]:
1110 1110 changes[2].remove('.hgsubstate')
1111 1111 elif '.hgsub' in changes[2]:
1112 1112 # clean up .hgsubstate when .hgsub is removed
1113 1113 if ('.hgsubstate' in wctx and
1114 1114 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1115 1115 changes[2].insert(0, '.hgsubstate')
1116 1116
1117 1117 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1118 1118 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1119 1119 if changedsubs:
1120 1120 raise util.Abort(_("uncommitted changes in subrepo %s")
1121 1121 % changedsubs[0],
1122 1122 hint=_("use --subrepos for recursive commit"))
1123 1123
1124 1124 # make sure all explicit patterns are matched
1125 1125 if not force and match.files():
1126 1126 matched = set(changes[0] + changes[1] + changes[2])
1127 1127
1128 1128 for f in match.files():
1129 1129 if f == '.' or f in matched or f in wctx.substate:
1130 1130 continue
1131 1131 if f in changes[3]: # missing
1132 1132 fail(f, _('file not found!'))
1133 1133 if f in vdirs: # visited directory
1134 1134 d = f + '/'
1135 1135 for mf in matched:
1136 1136 if mf.startswith(d):
1137 1137 break
1138 1138 else:
1139 1139 fail(f, _("no match under directory!"))
1140 1140 elif f not in self.dirstate:
1141 1141 fail(f, _("file not tracked!"))
1142 1142
1143 1143 if (not force and not extra.get("close") and not merge
1144 1144 and not (changes[0] or changes[1] or changes[2])
1145 1145 and wctx.branch() == wctx.p1().branch()):
1146 1146 return None
1147 1147
1148 1148 ms = mergemod.mergestate(self)
1149 1149 for f in changes[0]:
1150 1150 if f in ms and ms[f] == 'u':
1151 1151 raise util.Abort(_("unresolved merge conflicts "
1152 1152 "(see hg help resolve)"))
1153 1153
1154 1154 cctx = context.workingctx(self, text, user, date, extra, changes)
1155 1155 if editor:
1156 1156 cctx._text = editor(self, cctx, subs)
1157 1157 edited = (text != cctx._text)
1158 1158
1159 1159 # commit subs
1160 1160 if subs or removedsubs:
1161 1161 state = wctx.substate.copy()
1162 1162 for s in sorted(subs):
1163 1163 sub = wctx.sub(s)
1164 1164 self.ui.status(_('committing subrepository %s\n') %
1165 1165 subrepo.subrelpath(sub))
1166 1166 sr = sub.commit(cctx._text, user, date)
1167 1167 state[s] = (state[s][0], sr)
1168 1168 subrepo.writestate(self, state)
1169 1169
1170 1170 # Save commit message in case this transaction gets rolled back
1171 1171 # (e.g. by a pretxncommit hook). Leave the content alone on
1172 1172 # the assumption that the user will use the same editor again.
1173 1173 msgfn = self.savecommitmessage(cctx._text)
1174 1174
1175 1175 p1, p2 = self.dirstate.parents()
1176 1176 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1177 1177 try:
1178 1178 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1179 1179 ret = self.commitctx(cctx, True)
1180 1180 except:
1181 1181 if edited:
1182 1182 self.ui.write(
1183 1183 _('note: commit message saved in %s\n') % msgfn)
1184 1184 raise
1185 1185
1186 1186 # update bookmarks, dirstate and mergestate
1187 1187 bookmarks.update(self, p1, ret)
1188 1188 for f in changes[0] + changes[1]:
1189 1189 self.dirstate.normal(f)
1190 1190 for f in changes[2]:
1191 1191 self.dirstate.drop(f)
1192 1192 self.dirstate.setparents(ret)
1193 1193 ms.reset()
1194 1194 finally:
1195 1195 wlock.release()
1196 1196
1197 1197 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1198 1198 return ret
1199 1199
1200 1200 def commitctx(self, ctx, error=False):
1201 1201 """Add a new revision to current repository.
1202 1202 Revision information is passed via the context argument.
1203 1203 """
1204 1204
1205 1205 tr = lock = None
1206 1206 removed = list(ctx.removed())
1207 1207 p1, p2 = ctx.p1(), ctx.p2()
1208 1208 user = ctx.user()
1209 1209
1210 1210 lock = self.lock()
1211 1211 try:
1212 1212 tr = self.transaction("commit")
1213 1213 trp = weakref.proxy(tr)
1214 1214
1215 1215 if ctx.files():
1216 1216 m1 = p1.manifest().copy()
1217 1217 m2 = p2.manifest()
1218 1218
1219 1219 # check in files
1220 1220 new = {}
1221 1221 changed = []
1222 1222 linkrev = len(self)
1223 1223 for f in sorted(ctx.modified() + ctx.added()):
1224 1224 self.ui.note(f + "\n")
1225 1225 try:
1226 1226 fctx = ctx[f]
1227 1227 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1228 1228 changed)
1229 1229 m1.set(f, fctx.flags())
1230 1230 except OSError, inst:
1231 1231 self.ui.warn(_("trouble committing %s!\n") % f)
1232 1232 raise
1233 1233 except IOError, inst:
1234 1234 errcode = getattr(inst, 'errno', errno.ENOENT)
1235 1235 if error or errcode and errcode != errno.ENOENT:
1236 1236 self.ui.warn(_("trouble committing %s!\n") % f)
1237 1237 raise
1238 1238 else:
1239 1239 removed.append(f)
1240 1240
1241 1241 # update manifest
1242 1242 m1.update(new)
1243 1243 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1244 1244 drop = [f for f in removed if f in m1]
1245 1245 for f in drop:
1246 1246 del m1[f]
1247 1247 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1248 1248 p2.manifestnode(), (new, drop))
1249 1249 files = changed + removed
1250 1250 else:
1251 1251 mn = p1.manifestnode()
1252 1252 files = []
1253 1253
1254 1254 # update changelog
1255 1255 self.changelog.delayupdate()
1256 1256 n = self.changelog.add(mn, files, ctx.description(),
1257 1257 trp, p1.node(), p2.node(),
1258 1258 user, ctx.date(), ctx.extra().copy())
1259 1259 p = lambda: self.changelog.writepending() and self.root or ""
1260 1260 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1261 1261 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1262 1262 parent2=xp2, pending=p)
1263 1263 self.changelog.finalize(trp)
1264 1264 # set the new commit is proper phase
1265 1265 targetphase = self.ui.configint('phases', 'new-commit',
1266 1266 phases.draft)
1267 1267 if targetphase:
1268 1268 # retract boundary do not alter parent changeset.
1269 1269 # if a parent have higher the resulting phase will
1270 1270 # be compliant anyway
1271 1271 #
1272 1272 # if minimal phase was 0 we don't need to retract anything
1273 1273 phases.retractboundary(self, targetphase, [n])
1274 1274 tr.close()
1275 1275 self.updatebranchcache()
1276 1276 return n
1277 1277 finally:
1278 1278 if tr:
1279 1279 tr.release()
1280 1280 lock.release()
1281 1281
1282 1282 def destroyed(self):
1283 1283 '''Inform the repository that nodes have been destroyed.
1284 1284 Intended for use by strip and rollback, so there's a common
1285 1285 place for anything that has to be done after destroying history.'''
1286 1286 # XXX it might be nice if we could take the list of destroyed
1287 1287 # nodes, but I don't see an easy way for rollback() to do that
1288 1288
1289 1289 # Ensure the persistent tag cache is updated. Doing it now
1290 1290 # means that the tag cache only has to worry about destroyed
1291 1291 # heads immediately after a strip/rollback. That in turn
1292 1292 # guarantees that "cachetip == currenttip" (comparing both rev
1293 1293 # and node) always means no nodes have been added or destroyed.
1294 1294
1295 1295 # XXX this is suboptimal when qrefresh'ing: we strip the current
1296 1296 # head, refresh the tag cache, then immediately add a new head.
1297 1297 # But I think doing it this way is necessary for the "instant
1298 1298 # tag cache retrieval" case to work.
1299 1299 self.invalidatecaches()
1300 1300
1301 1301 def walk(self, match, node=None):
1302 1302 '''
1303 1303 walk recursively through the directory tree or a given
1304 1304 changeset, finding all files matched by the match
1305 1305 function
1306 1306 '''
1307 1307 return self[node].walk(match)
1308 1308
1309 1309 def status(self, node1='.', node2=None, match=None,
1310 1310 ignored=False, clean=False, unknown=False,
1311 1311 listsubrepos=False):
1312 1312 """return status of files between two nodes or node and working directory
1313 1313
1314 1314 If node1 is None, use the first dirstate parent instead.
1315 1315 If node2 is None, compare node1 with working directory.
1316 1316 """
1317 1317
1318 1318 def mfmatches(ctx):
1319 1319 mf = ctx.manifest().copy()
1320 1320 for fn in mf.keys():
1321 1321 if not match(fn):
1322 1322 del mf[fn]
1323 1323 return mf
1324 1324
1325 1325 if isinstance(node1, context.changectx):
1326 1326 ctx1 = node1
1327 1327 else:
1328 1328 ctx1 = self[node1]
1329 1329 if isinstance(node2, context.changectx):
1330 1330 ctx2 = node2
1331 1331 else:
1332 1332 ctx2 = self[node2]
1333 1333
1334 1334 working = ctx2.rev() is None
1335 1335 parentworking = working and ctx1 == self['.']
1336 1336 match = match or matchmod.always(self.root, self.getcwd())
1337 1337 listignored, listclean, listunknown = ignored, clean, unknown
1338 1338
1339 1339 # load earliest manifest first for caching reasons
1340 1340 if not working and ctx2.rev() < ctx1.rev():
1341 1341 ctx2.manifest()
1342 1342
1343 1343 if not parentworking:
1344 1344 def bad(f, msg):
1345 1345 if f not in ctx1:
1346 1346 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1347 1347 match.bad = bad
1348 1348
1349 1349 if working: # we need to scan the working dir
1350 1350 subrepos = []
1351 1351 if '.hgsub' in self.dirstate:
1352 1352 subrepos = ctx2.substate.keys()
1353 1353 s = self.dirstate.status(match, subrepos, listignored,
1354 1354 listclean, listunknown)
1355 1355 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1356 1356
1357 1357 # check for any possibly clean files
1358 1358 if parentworking and cmp:
1359 1359 fixup = []
1360 1360 # do a full compare of any files that might have changed
1361 1361 for f in sorted(cmp):
1362 1362 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1363 1363 or ctx1[f].cmp(ctx2[f])):
1364 1364 modified.append(f)
1365 1365 else:
1366 1366 fixup.append(f)
1367 1367
1368 1368 # update dirstate for files that are actually clean
1369 1369 if fixup:
1370 1370 if listclean:
1371 1371 clean += fixup
1372 1372
1373 1373 try:
1374 1374 # updating the dirstate is optional
1375 1375 # so we don't wait on the lock
1376 1376 wlock = self.wlock(False)
1377 1377 try:
1378 1378 for f in fixup:
1379 1379 self.dirstate.normal(f)
1380 1380 finally:
1381 1381 wlock.release()
1382 1382 except error.LockError:
1383 1383 pass
1384 1384
1385 1385 if not parentworking:
1386 1386 mf1 = mfmatches(ctx1)
1387 1387 if working:
1388 1388 # we are comparing working dir against non-parent
1389 1389 # generate a pseudo-manifest for the working dir
1390 1390 mf2 = mfmatches(self['.'])
1391 1391 for f in cmp + modified + added:
1392 1392 mf2[f] = None
1393 1393 mf2.set(f, ctx2.flags(f))
1394 1394 for f in removed:
1395 1395 if f in mf2:
1396 1396 del mf2[f]
1397 1397 else:
1398 1398 # we are comparing two revisions
1399 1399 deleted, unknown, ignored = [], [], []
1400 1400 mf2 = mfmatches(ctx2)
1401 1401
1402 1402 modified, added, clean = [], [], []
1403 1403 for fn in mf2:
1404 1404 if fn in mf1:
1405 1405 if (fn not in deleted and
1406 1406 (mf1.flags(fn) != mf2.flags(fn) or
1407 1407 (mf1[fn] != mf2[fn] and
1408 1408 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1409 1409 modified.append(fn)
1410 1410 elif listclean:
1411 1411 clean.append(fn)
1412 1412 del mf1[fn]
1413 1413 elif fn not in deleted:
1414 1414 added.append(fn)
1415 1415 removed = mf1.keys()
1416 1416
1417 1417 if working and modified and not self.dirstate._checklink:
1418 1418 # Symlink placeholders may get non-symlink-like contents
1419 1419 # via user error or dereferencing by NFS or Samba servers,
1420 1420 # so we filter out any placeholders that don't look like a
1421 1421 # symlink
1422 1422 sane = []
1423 1423 for f in modified:
1424 1424 if ctx2.flags(f) == 'l':
1425 1425 d = ctx2[f].data()
1426 1426 if len(d) >= 1024 or '\n' in d or util.binary(d):
1427 1427 self.ui.debug('ignoring suspect symlink placeholder'
1428 1428 ' "%s"\n' % f)
1429 1429 continue
1430 1430 sane.append(f)
1431 1431 modified = sane
1432 1432
1433 1433 r = modified, added, removed, deleted, unknown, ignored, clean
1434 1434
1435 1435 if listsubrepos:
1436 1436 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1437 1437 if working:
1438 1438 rev2 = None
1439 1439 else:
1440 1440 rev2 = ctx2.substate[subpath][1]
1441 1441 try:
1442 1442 submatch = matchmod.narrowmatcher(subpath, match)
1443 1443 s = sub.status(rev2, match=submatch, ignored=listignored,
1444 1444 clean=listclean, unknown=listunknown,
1445 1445 listsubrepos=True)
1446 1446 for rfiles, sfiles in zip(r, s):
1447 1447 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1448 1448 except error.LookupError:
1449 1449 self.ui.status(_("skipping missing subrepository: %s\n")
1450 1450 % subpath)
1451 1451
1452 1452 for l in r:
1453 1453 l.sort()
1454 1454 return r
1455 1455
1456 1456 def heads(self, start=None):
1457 1457 heads = self.changelog.heads(start)
1458 1458 # sort the output in rev descending order
1459 1459 return sorted(heads, key=self.changelog.rev, reverse=True)
1460 1460
1461 1461 def branchheads(self, branch=None, start=None, closed=False):
1462 1462 '''return a (possibly filtered) list of heads for the given branch
1463 1463
1464 1464 Heads are returned in topological order, from newest to oldest.
1465 1465 If branch is None, use the dirstate branch.
1466 1466 If start is not None, return only heads reachable from start.
1467 1467 If closed is True, return heads that are marked as closed as well.
1468 1468 '''
1469 1469 if branch is None:
1470 1470 branch = self[None].branch()
1471 1471 branches = self.branchmap()
1472 1472 if branch not in branches:
1473 1473 return []
1474 1474 # the cache returns heads ordered lowest to highest
1475 1475 bheads = list(reversed(branches[branch]))
1476 1476 if start is not None:
1477 1477 # filter out the heads that cannot be reached from startrev
1478 1478 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1479 1479 bheads = [h for h in bheads if h in fbheads]
1480 1480 if not closed:
1481 1481 bheads = [h for h in bheads if
1482 1482 ('close' not in self.changelog.read(h)[5])]
1483 1483 return bheads
1484 1484
1485 1485 def branches(self, nodes):
1486 1486 if not nodes:
1487 1487 nodes = [self.changelog.tip()]
1488 1488 b = []
1489 1489 for n in nodes:
1490 1490 t = n
1491 1491 while True:
1492 1492 p = self.changelog.parents(n)
1493 1493 if p[1] != nullid or p[0] == nullid:
1494 1494 b.append((t, n, p[0], p[1]))
1495 1495 break
1496 1496 n = p[0]
1497 1497 return b
1498 1498
1499 1499 def between(self, pairs):
1500 1500 r = []
1501 1501
1502 1502 for top, bottom in pairs:
1503 1503 n, l, i = top, [], 0
1504 1504 f = 1
1505 1505
1506 1506 while n != bottom and n != nullid:
1507 1507 p = self.changelog.parents(n)[0]
1508 1508 if i == f:
1509 1509 l.append(n)
1510 1510 f = f * 2
1511 1511 n = p
1512 1512 i += 1
1513 1513
1514 1514 r.append(l)
1515 1515
1516 1516 return r
1517 1517
1518 1518 def pull(self, remote, heads=None, force=False):
1519 1519 lock = self.lock()
1520 1520 try:
1521 1521 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1522 1522 force=force)
1523 1523 common, fetch, rheads = tmp
1524 1524 if not fetch:
1525 1525 self.ui.status(_("no changes found\n"))
1526 1526 added = []
1527 1527 result = 0
1528 1528 else:
1529 1529 if heads is None and list(common) == [nullid]:
1530 1530 self.ui.status(_("requesting all changes\n"))
1531 1531 elif heads is None and remote.capable('changegroupsubset'):
1532 1532 # issue1320, avoid a race if remote changed after discovery
1533 1533 heads = rheads
1534 1534
1535 1535 if remote.capable('getbundle'):
1536 1536 cg = remote.getbundle('pull', common=common,
1537 1537 heads=heads or rheads)
1538 1538 elif heads is None:
1539 1539 cg = remote.changegroup(fetch, 'pull')
1540 1540 elif not remote.capable('changegroupsubset'):
1541 1541 raise util.Abort(_("partial pull cannot be done because "
1542 1542 "other repository doesn't support "
1543 1543 "changegroupsubset."))
1544 1544 else:
1545 1545 cg = remote.changegroupsubset(fetch, heads, 'pull')
1546 1546 clstart = len(self.changelog)
1547 1547 result = self.addchangegroup(cg, 'pull', remote.url())
1548 1548 clend = len(self.changelog)
1549 1549 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1550 1550
1551 1551
1552 1552 # Get remote phases data from remote
1553 1553 remotephases = remote.listkeys('phases')
1554 1554 publishing = bool(remotephases.get('publishing', False))
1555 1555 if remotephases and not publishing:
1556 1556 # remote is new and unpublishing
1557 1557 subset = common + added
1558 rheads, rroots = phases.analyzeremotephases(self, subset,
1559 remotephases)
1560 for phase, boundary in enumerate(rheads):
1561 phases.advanceboundary(self, phase, boundary)
1558 pheads, _dr = phases.analyzeremotephases(self, subset,
1559 remotephases)
1560 phases.advanceboundary(self, phases.public, pheads)
1561 phases.advanceboundary(self, phases.draft, common + added)
1562 1562 else:
1563 1563 # Remote is old or publishing all common changesets
1564 1564 # should be seen as public
1565 1565 phases.advanceboundary(self, phases.public, common + added)
1566 1566 finally:
1567 1567 lock.release()
1568 1568
1569 1569 return result
1570 1570
1571 1571 def checkpush(self, force, revs):
1572 1572 """Extensions can override this function if additional checks have
1573 1573 to be performed before pushing, or call it if they override push
1574 1574 command.
1575 1575 """
1576 1576 pass
1577 1577
1578 1578 def push(self, remote, force=False, revs=None, newbranch=False):
1579 1579 '''Push outgoing changesets (limited by revs) from the current
1580 1580 repository to remote. Return an integer:
1581 1581 - 0 means HTTP error *or* nothing to push
1582 1582 - 1 means we pushed and remote head count is unchanged *or*
1583 1583 we have outgoing changesets but refused to push
1584 1584 - other values as described by addchangegroup()
1585 1585 '''
1586 1586 # there are two ways to push to remote repo:
1587 1587 #
1588 1588 # addchangegroup assumes local user can lock remote
1589 1589 # repo (local filesystem, old ssh servers).
1590 1590 #
1591 1591 # unbundle assumes local user cannot lock remote repo (new ssh
1592 1592 # servers, http servers).
1593 1593
1594 1594 self.checkpush(force, revs)
1595 1595 lock = None
1596 1596 unbundle = remote.capable('unbundle')
1597 1597 if not unbundle:
1598 1598 lock = remote.lock()
1599 1599 try:
1600 1600 # get local lock as we might write phase data
1601 1601 locallock = self.lock()
1602 1602 try:
1603 1603 cg, remote_heads, fut = discovery.prepush(self, remote, force,
1604 1604 revs, newbranch)
1605 1605 ret = remote_heads
1606 1606 # create a callback for addchangegroup.
1607 1607 # If will be used branch of the conditionnal too.
1608 1608 if cg is not None:
1609 1609 if unbundle:
1610 1610 # local repo finds heads on server, finds out what
1611 1611 # revs it must push. once revs transferred, if server
1612 1612 # finds it has different heads (someone else won
1613 1613 # commit/push race), server aborts.
1614 1614 if force:
1615 1615 remote_heads = ['force']
1616 1616 # ssh: return remote's addchangegroup()
1617 1617 # http: return remote's addchangegroup() or 0 for error
1618 1618 ret = remote.unbundle(cg, remote_heads, 'push')
1619 1619 else:
1620 1620 # we return an integer indicating remote head count change
1621 1621 ret = remote.addchangegroup(cg, 'push', self.url())
1622 1622
1623 1623 # even when we don't push, exchanging phase data is useful
1624 1624 remotephases = remote.listkeys('phases')
1625 1625 if not remotephases: # old server or public only repo
1626 1626 phases.advanceboundary(self, phases.public, fut)
1627 1627 # don't push any phase data as there is nothing to push
1628 1628 else:
1629 1629 ana = phases.analyzeremotephases(self, fut, remotephases)
1630 rheads, rroots = ana
1630 pheads, droots = ana
1631 1631 ### Apply remote phase on local
1632 1632 if remotephases.get('publishing', False):
1633 1633 phases.advanceboundary(self, phases.public, fut)
1634 1634 else: # publish = False
1635 for phase, rpheads in enumerate(rheads):
1636 phases.advanceboundary(self, phase, rpheads)
1635 phases.advanceboundary(self, phases.public, pheads)
1636 phases.advanceboundary(self, phases.draft, fut)
1637 1637 ### Apply local phase on remote
1638 1638 #
1639 1639 # XXX If push failed we should use strict common and not
1640 # future to avoir pushing phase data on unknown changeset.
1640 # future to avoid pushing phase data on unknown changeset.
1641 1641 # This is to done later.
1642 1642
1643 # element we want to push
1644 topush = []
1645
1646 # store details of known remote phase of several revision
1647 # /!\ set of index I holds rev where: I <= rev.phase()
1648 # /!\ public phase (index 0) is ignored
1649 remdetails = [set() for i in xrange(len(phases.allphases))]
1650 _revs = set()
1651 for relremphase in phases.trackedphases[::-1]:
1652 # we iterate backward because the list alway grows
1653 # when filled in this direction.
1654 _revs.update(self.revs('%ln::%ln',
1655 rroots[relremphase], fut))
1656 remdetails[relremphase].update(_revs)
1657
1658 for phase in phases.allphases[:-1]:
1659 # We don't need the last phase as we will never want to
1660 # move anything to it while moving phase backward.
1661
1662 # Get the list of all revs on remote which are in a
1663 # phase higher than currently processed phase.
1664 relremrev = remdetails[phase + 1]
1665
1666 if not relremrev:
1667 # no candidate to remote push anymore
1668 # break before any expensive revset
1669 break
1670
1671 #dynamical inject appropriate phase symbol
1672 phasename = phases.phasenames[phase]
1673 odrevset = 'heads(%%ld and %s())' % phasename
1674 outdated = self.set(odrevset, relremrev)
1675 for od in outdated:
1676 candstart = len(remdetails) - 1
1677 candstop = phase + 1
1678 candidateold = xrange(candstart, candstop, -1)
1679 for oldphase in candidateold:
1680 if od.rev() in remdetails[oldphase]:
1681 break
1682 else: # last one: no need to search
1683 oldphase = phase + 1
1684 topush.append((oldphase, phase, od))
1685
1686 # push every needed data
1687 for oldphase, newphase, newremotehead in topush:
1643 # Get the list of all revs draft on remote by public here.
1644 # XXX Beware that revset break if droots is not strictly
1645 # XXX root we may want to ensure it is but it is costly
1646 outdated = self.set('heads((%ln::%ln) and public())',
1647 droots, fut)
1648 for newremotehead in outdated:
1688 1649 r = remote.pushkey('phases',
1689 1650 newremotehead.hex(),
1690 str(oldphase), str(newphase))
1651 str(phases.draft),
1652 str(phases.public))
1691 1653 if not r:
1692 self.ui.warn(_('updating phase of %s '
1693 'to %s from %s failed!\n')
1694 % (newremotehead, newphase,
1695 oldphase))
1654 self.ui.warn(_('updating %s to public failed!\n')
1655 % newremotehead)
1696 1656 finally:
1697 1657 locallock.release()
1698 1658 finally:
1699 1659 if lock is not None:
1700 1660 lock.release()
1701 1661
1702 1662 self.ui.debug("checking for updated bookmarks\n")
1703 1663 rb = remote.listkeys('bookmarks')
1704 1664 for k in rb.keys():
1705 1665 if k in self._bookmarks:
1706 1666 nr, nl = rb[k], hex(self._bookmarks[k])
1707 1667 if nr in self:
1708 1668 cr = self[nr]
1709 1669 cl = self[nl]
1710 1670 if cl in cr.descendants():
1711 1671 r = remote.pushkey('bookmarks', k, nr, nl)
1712 1672 if r:
1713 1673 self.ui.status(_("updating bookmark %s\n") % k)
1714 1674 else:
1715 1675 self.ui.warn(_('updating bookmark %s'
1716 1676 ' failed!\n') % k)
1717 1677
1718 1678 return ret
1719 1679
1720 1680 def changegroupinfo(self, nodes, source):
1721 1681 if self.ui.verbose or source == 'bundle':
1722 1682 self.ui.status(_("%d changesets found\n") % len(nodes))
1723 1683 if self.ui.debugflag:
1724 1684 self.ui.debug("list of changesets:\n")
1725 1685 for node in nodes:
1726 1686 self.ui.debug("%s\n" % hex(node))
1727 1687
1728 1688 def changegroupsubset(self, bases, heads, source):
1729 1689 """Compute a changegroup consisting of all the nodes that are
1730 1690 descendants of any of the bases and ancestors of any of the heads.
1731 1691 Return a chunkbuffer object whose read() method will return
1732 1692 successive changegroup chunks.
1733 1693
1734 1694 It is fairly complex as determining which filenodes and which
1735 1695 manifest nodes need to be included for the changeset to be complete
1736 1696 is non-trivial.
1737 1697
1738 1698 Another wrinkle is doing the reverse, figuring out which changeset in
1739 1699 the changegroup a particular filenode or manifestnode belongs to.
1740 1700 """
1741 1701 cl = self.changelog
1742 1702 if not bases:
1743 1703 bases = [nullid]
1744 1704 csets, bases, heads = cl.nodesbetween(bases, heads)
1745 1705 # We assume that all ancestors of bases are known
1746 1706 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1747 1707 return self._changegroupsubset(common, csets, heads, source)
1748 1708
1749 1709 def getlocalbundle(self, source, outgoing):
1750 1710 """Like getbundle, but taking a discovery.outgoing as an argument.
1751 1711
1752 1712 This is only implemented for local repos and reuses potentially
1753 1713 precomputed sets in outgoing."""
1754 1714 if not outgoing.missing:
1755 1715 return None
1756 1716 return self._changegroupsubset(outgoing.common,
1757 1717 outgoing.missing,
1758 1718 outgoing.missingheads,
1759 1719 source)
1760 1720
1761 1721 def getbundle(self, source, heads=None, common=None):
1762 1722 """Like changegroupsubset, but returns the set difference between the
1763 1723 ancestors of heads and the ancestors common.
1764 1724
1765 1725 If heads is None, use the local heads. If common is None, use [nullid].
1766 1726
1767 1727 The nodes in common might not all be known locally due to the way the
1768 1728 current discovery protocol works.
1769 1729 """
1770 1730 cl = self.changelog
1771 1731 if common:
1772 1732 nm = cl.nodemap
1773 1733 common = [n for n in common if n in nm]
1774 1734 else:
1775 1735 common = [nullid]
1776 1736 if not heads:
1777 1737 heads = cl.heads()
1778 1738 return self.getlocalbundle(source,
1779 1739 discovery.outgoing(cl, common, heads))
1780 1740
1781 1741 def _changegroupsubset(self, commonrevs, csets, heads, source):
1782 1742
1783 1743 cl = self.changelog
1784 1744 mf = self.manifest
1785 1745 mfs = {} # needed manifests
1786 1746 fnodes = {} # needed file nodes
1787 1747 changedfiles = set()
1788 1748 fstate = ['', {}]
1789 1749 count = [0]
1790 1750
1791 1751 # can we go through the fast path ?
1792 1752 heads.sort()
1793 1753 if heads == sorted(self.heads()):
1794 1754 return self._changegroup(csets, source)
1795 1755
1796 1756 # slow path
1797 1757 self.hook('preoutgoing', throw=True, source=source)
1798 1758 self.changegroupinfo(csets, source)
1799 1759
1800 1760 # filter any nodes that claim to be part of the known set
1801 1761 def prune(revlog, missing):
1802 1762 return [n for n in missing
1803 1763 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1804 1764
1805 1765 def lookup(revlog, x):
1806 1766 if revlog == cl:
1807 1767 c = cl.read(x)
1808 1768 changedfiles.update(c[3])
1809 1769 mfs.setdefault(c[0], x)
1810 1770 count[0] += 1
1811 1771 self.ui.progress(_('bundling'), count[0],
1812 1772 unit=_('changesets'), total=len(csets))
1813 1773 return x
1814 1774 elif revlog == mf:
1815 1775 clnode = mfs[x]
1816 1776 mdata = mf.readfast(x)
1817 1777 for f in changedfiles:
1818 1778 if f in mdata:
1819 1779 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1820 1780 count[0] += 1
1821 1781 self.ui.progress(_('bundling'), count[0],
1822 1782 unit=_('manifests'), total=len(mfs))
1823 1783 return mfs[x]
1824 1784 else:
1825 1785 self.ui.progress(
1826 1786 _('bundling'), count[0], item=fstate[0],
1827 1787 unit=_('files'), total=len(changedfiles))
1828 1788 return fstate[1][x]
1829 1789
1830 1790 bundler = changegroup.bundle10(lookup)
1831 1791 reorder = self.ui.config('bundle', 'reorder', 'auto')
1832 1792 if reorder == 'auto':
1833 1793 reorder = None
1834 1794 else:
1835 1795 reorder = util.parsebool(reorder)
1836 1796
1837 1797 def gengroup():
1838 1798 # Create a changenode group generator that will call our functions
1839 1799 # back to lookup the owning changenode and collect information.
1840 1800 for chunk in cl.group(csets, bundler, reorder=reorder):
1841 1801 yield chunk
1842 1802 self.ui.progress(_('bundling'), None)
1843 1803
1844 1804 # Create a generator for the manifestnodes that calls our lookup
1845 1805 # and data collection functions back.
1846 1806 count[0] = 0
1847 1807 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1848 1808 yield chunk
1849 1809 self.ui.progress(_('bundling'), None)
1850 1810
1851 1811 mfs.clear()
1852 1812
1853 1813 # Go through all our files in order sorted by name.
1854 1814 count[0] = 0
1855 1815 for fname in sorted(changedfiles):
1856 1816 filerevlog = self.file(fname)
1857 1817 if not len(filerevlog):
1858 1818 raise util.Abort(_("empty or missing revlog for %s") % fname)
1859 1819 fstate[0] = fname
1860 1820 fstate[1] = fnodes.pop(fname, {})
1861 1821
1862 1822 nodelist = prune(filerevlog, fstate[1])
1863 1823 if nodelist:
1864 1824 count[0] += 1
1865 1825 yield bundler.fileheader(fname)
1866 1826 for chunk in filerevlog.group(nodelist, bundler, reorder):
1867 1827 yield chunk
1868 1828
1869 1829 # Signal that no more groups are left.
1870 1830 yield bundler.close()
1871 1831 self.ui.progress(_('bundling'), None)
1872 1832
1873 1833 if csets:
1874 1834 self.hook('outgoing', node=hex(csets[0]), source=source)
1875 1835
1876 1836 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1877 1837
1878 1838 def changegroup(self, basenodes, source):
1879 1839 # to avoid a race we use changegroupsubset() (issue1320)
1880 1840 return self.changegroupsubset(basenodes, self.heads(), source)
1881 1841
1882 1842 def _changegroup(self, nodes, source):
1883 1843 """Compute the changegroup of all nodes that we have that a recipient
1884 1844 doesn't. Return a chunkbuffer object whose read() method will return
1885 1845 successive changegroup chunks.
1886 1846
1887 1847 This is much easier than the previous function as we can assume that
1888 1848 the recipient has any changenode we aren't sending them.
1889 1849
1890 1850 nodes is the set of nodes to send"""
1891 1851
1892 1852 cl = self.changelog
1893 1853 mf = self.manifest
1894 1854 mfs = {}
1895 1855 changedfiles = set()
1896 1856 fstate = ['']
1897 1857 count = [0]
1898 1858
1899 1859 self.hook('preoutgoing', throw=True, source=source)
1900 1860 self.changegroupinfo(nodes, source)
1901 1861
1902 1862 revset = set([cl.rev(n) for n in nodes])
1903 1863
1904 1864 def gennodelst(log):
1905 1865 return [log.node(r) for r in log if log.linkrev(r) in revset]
1906 1866
1907 1867 def lookup(revlog, x):
1908 1868 if revlog == cl:
1909 1869 c = cl.read(x)
1910 1870 changedfiles.update(c[3])
1911 1871 mfs.setdefault(c[0], x)
1912 1872 count[0] += 1
1913 1873 self.ui.progress(_('bundling'), count[0],
1914 1874 unit=_('changesets'), total=len(nodes))
1915 1875 return x
1916 1876 elif revlog == mf:
1917 1877 count[0] += 1
1918 1878 self.ui.progress(_('bundling'), count[0],
1919 1879 unit=_('manifests'), total=len(mfs))
1920 1880 return cl.node(revlog.linkrev(revlog.rev(x)))
1921 1881 else:
1922 1882 self.ui.progress(
1923 1883 _('bundling'), count[0], item=fstate[0],
1924 1884 total=len(changedfiles), unit=_('files'))
1925 1885 return cl.node(revlog.linkrev(revlog.rev(x)))
1926 1886
1927 1887 bundler = changegroup.bundle10(lookup)
1928 1888 reorder = self.ui.config('bundle', 'reorder', 'auto')
1929 1889 if reorder == 'auto':
1930 1890 reorder = None
1931 1891 else:
1932 1892 reorder = util.parsebool(reorder)
1933 1893
1934 1894 def gengroup():
1935 1895 '''yield a sequence of changegroup chunks (strings)'''
1936 1896 # construct a list of all changed files
1937 1897
1938 1898 for chunk in cl.group(nodes, bundler, reorder=reorder):
1939 1899 yield chunk
1940 1900 self.ui.progress(_('bundling'), None)
1941 1901
1942 1902 count[0] = 0
1943 1903 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1944 1904 yield chunk
1945 1905 self.ui.progress(_('bundling'), None)
1946 1906
1947 1907 count[0] = 0
1948 1908 for fname in sorted(changedfiles):
1949 1909 filerevlog = self.file(fname)
1950 1910 if not len(filerevlog):
1951 1911 raise util.Abort(_("empty or missing revlog for %s") % fname)
1952 1912 fstate[0] = fname
1953 1913 nodelist = gennodelst(filerevlog)
1954 1914 if nodelist:
1955 1915 count[0] += 1
1956 1916 yield bundler.fileheader(fname)
1957 1917 for chunk in filerevlog.group(nodelist, bundler, reorder):
1958 1918 yield chunk
1959 1919 yield bundler.close()
1960 1920 self.ui.progress(_('bundling'), None)
1961 1921
1962 1922 if nodes:
1963 1923 self.hook('outgoing', node=hex(nodes[0]), source=source)
1964 1924
1965 1925 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1966 1926
1967 1927 def addchangegroup(self, source, srctype, url, emptyok=False):
1968 1928 """Add the changegroup returned by source.read() to this repo.
1969 1929 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1970 1930 the URL of the repo where this changegroup is coming from.
1971 1931
1972 1932 Return an integer summarizing the change to this repo:
1973 1933 - nothing changed or no source: 0
1974 1934 - more heads than before: 1+added heads (2..n)
1975 1935 - fewer heads than before: -1-removed heads (-2..-n)
1976 1936 - number of heads stays the same: 1
1977 1937 """
1978 1938 def csmap(x):
1979 1939 self.ui.debug("add changeset %s\n" % short(x))
1980 1940 return len(cl)
1981 1941
1982 1942 def revmap(x):
1983 1943 return cl.rev(x)
1984 1944
1985 1945 if not source:
1986 1946 return 0
1987 1947
1988 1948 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1989 1949
1990 1950 changesets = files = revisions = 0
1991 1951 efiles = set()
1992 1952
1993 1953 # write changelog data to temp files so concurrent readers will not see
1994 1954 # inconsistent view
1995 1955 cl = self.changelog
1996 1956 cl.delayupdate()
1997 1957 oldheads = cl.heads()
1998 1958
1999 1959 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2000 1960 try:
2001 1961 trp = weakref.proxy(tr)
2002 1962 # pull off the changeset group
2003 1963 self.ui.status(_("adding changesets\n"))
2004 1964 clstart = len(cl)
2005 1965 class prog(object):
2006 1966 step = _('changesets')
2007 1967 count = 1
2008 1968 ui = self.ui
2009 1969 total = None
2010 1970 def __call__(self):
2011 1971 self.ui.progress(self.step, self.count, unit=_('chunks'),
2012 1972 total=self.total)
2013 1973 self.count += 1
2014 1974 pr = prog()
2015 1975 source.callback = pr
2016 1976
2017 1977 source.changelogheader()
2018 1978 srccontent = cl.addgroup(source, csmap, trp)
2019 1979 if not (srccontent or emptyok):
2020 1980 raise util.Abort(_("received changelog group is empty"))
2021 1981 clend = len(cl)
2022 1982 changesets = clend - clstart
2023 1983 for c in xrange(clstart, clend):
2024 1984 efiles.update(self[c].files())
2025 1985 efiles = len(efiles)
2026 1986 self.ui.progress(_('changesets'), None)
2027 1987
2028 1988 # pull off the manifest group
2029 1989 self.ui.status(_("adding manifests\n"))
2030 1990 pr.step = _('manifests')
2031 1991 pr.count = 1
2032 1992 pr.total = changesets # manifests <= changesets
2033 1993 # no need to check for empty manifest group here:
2034 1994 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2035 1995 # no new manifest will be created and the manifest group will
2036 1996 # be empty during the pull
2037 1997 source.manifestheader()
2038 1998 self.manifest.addgroup(source, revmap, trp)
2039 1999 self.ui.progress(_('manifests'), None)
2040 2000
2041 2001 needfiles = {}
2042 2002 if self.ui.configbool('server', 'validate', default=False):
2043 2003 # validate incoming csets have their manifests
2044 2004 for cset in xrange(clstart, clend):
2045 2005 mfest = self.changelog.read(self.changelog.node(cset))[0]
2046 2006 mfest = self.manifest.readdelta(mfest)
2047 2007 # store file nodes we must see
2048 2008 for f, n in mfest.iteritems():
2049 2009 needfiles.setdefault(f, set()).add(n)
2050 2010
2051 2011 # process the files
2052 2012 self.ui.status(_("adding file changes\n"))
2053 2013 pr.step = _('files')
2054 2014 pr.count = 1
2055 2015 pr.total = efiles
2056 2016 source.callback = None
2057 2017
2058 2018 while True:
2059 2019 chunkdata = source.filelogheader()
2060 2020 if not chunkdata:
2061 2021 break
2062 2022 f = chunkdata["filename"]
2063 2023 self.ui.debug("adding %s revisions\n" % f)
2064 2024 pr()
2065 2025 fl = self.file(f)
2066 2026 o = len(fl)
2067 2027 if not fl.addgroup(source, revmap, trp):
2068 2028 raise util.Abort(_("received file revlog group is empty"))
2069 2029 revisions += len(fl) - o
2070 2030 files += 1
2071 2031 if f in needfiles:
2072 2032 needs = needfiles[f]
2073 2033 for new in xrange(o, len(fl)):
2074 2034 n = fl.node(new)
2075 2035 if n in needs:
2076 2036 needs.remove(n)
2077 2037 if not needs:
2078 2038 del needfiles[f]
2079 2039 self.ui.progress(_('files'), None)
2080 2040
2081 2041 for f, needs in needfiles.iteritems():
2082 2042 fl = self.file(f)
2083 2043 for n in needs:
2084 2044 try:
2085 2045 fl.rev(n)
2086 2046 except error.LookupError:
2087 2047 raise util.Abort(
2088 2048 _('missing file data for %s:%s - run hg verify') %
2089 2049 (f, hex(n)))
2090 2050
2091 2051 dh = 0
2092 2052 if oldheads:
2093 2053 heads = cl.heads()
2094 2054 dh = len(heads) - len(oldheads)
2095 2055 for h in heads:
2096 2056 if h not in oldheads and 'close' in self[h].extra():
2097 2057 dh -= 1
2098 2058 htext = ""
2099 2059 if dh:
2100 2060 htext = _(" (%+d heads)") % dh
2101 2061
2102 2062 self.ui.status(_("added %d changesets"
2103 2063 " with %d changes to %d files%s\n")
2104 2064 % (changesets, revisions, files, htext))
2105 2065
2106 2066 if changesets > 0:
2107 2067 p = lambda: cl.writepending() and self.root or ""
2108 2068 self.hook('pretxnchangegroup', throw=True,
2109 2069 node=hex(cl.node(clstart)), source=srctype,
2110 2070 url=url, pending=p)
2111 2071
2112 2072 added = [cl.node(r) for r in xrange(clstart, clend)]
2113 2073 publishing = self.ui.configbool('phases', 'publish', True)
2114 2074 if srctype == 'push':
2115 2075 # Old server can not push the boundary themself.
2116 2076 # New server won't push the boundary if changeset already
2117 2077 # existed locally as secrete
2118 2078 #
2119 2079 # We should not use added here but the list of all change in
2120 2080 # the bundle
2121 2081 if publishing:
2122 2082 phases.advanceboundary(self, phases.public, srccontent)
2123 2083 else:
2124 2084 phases.advanceboundary(self, phases.draft, srccontent)
2125 2085 phases.retractboundary(self, phases.draft, added)
2126 2086 elif srctype != 'strip':
2127 2087 # publishing only alter behavior during push
2128 2088 #
2129 2089 # strip should not touch boundary at all
2130 2090 phases.retractboundary(self, phases.draft, added)
2131 2091
2132 2092 # make changelog see real files again
2133 2093 cl.finalize(trp)
2134 2094
2135 2095 tr.close()
2136 2096
2137 2097 if changesets > 0:
2138 2098 def runhooks():
2139 2099 # forcefully update the on-disk branch cache
2140 2100 self.ui.debug("updating the branch cache\n")
2141 2101 self.updatebranchcache()
2142 2102 self.hook("changegroup", node=hex(cl.node(clstart)),
2143 2103 source=srctype, url=url)
2144 2104
2145 2105 for n in added:
2146 2106 self.hook("incoming", node=hex(n), source=srctype,
2147 2107 url=url)
2148 2108 self._afterlock(runhooks)
2149 2109
2150 2110 finally:
2151 2111 tr.release()
2152 2112 # never return 0 here:
2153 2113 if dh < 0:
2154 2114 return dh - 1
2155 2115 else:
2156 2116 return dh + 1
2157 2117
2158 2118 def stream_in(self, remote, requirements):
2159 2119 lock = self.lock()
2160 2120 try:
2161 2121 fp = remote.stream_out()
2162 2122 l = fp.readline()
2163 2123 try:
2164 2124 resp = int(l)
2165 2125 except ValueError:
2166 2126 raise error.ResponseError(
2167 2127 _('Unexpected response from remote server:'), l)
2168 2128 if resp == 1:
2169 2129 raise util.Abort(_('operation forbidden by server'))
2170 2130 elif resp == 2:
2171 2131 raise util.Abort(_('locking the remote repository failed'))
2172 2132 elif resp != 0:
2173 2133 raise util.Abort(_('the server sent an unknown error code'))
2174 2134 self.ui.status(_('streaming all changes\n'))
2175 2135 l = fp.readline()
2176 2136 try:
2177 2137 total_files, total_bytes = map(int, l.split(' ', 1))
2178 2138 except (ValueError, TypeError):
2179 2139 raise error.ResponseError(
2180 2140 _('Unexpected response from remote server:'), l)
2181 2141 self.ui.status(_('%d files to transfer, %s of data\n') %
2182 2142 (total_files, util.bytecount(total_bytes)))
2183 2143 start = time.time()
2184 2144 for i in xrange(total_files):
2185 2145 # XXX doesn't support '\n' or '\r' in filenames
2186 2146 l = fp.readline()
2187 2147 try:
2188 2148 name, size = l.split('\0', 1)
2189 2149 size = int(size)
2190 2150 except (ValueError, TypeError):
2191 2151 raise error.ResponseError(
2192 2152 _('Unexpected response from remote server:'), l)
2193 2153 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2194 2154 # for backwards compat, name was partially encoded
2195 2155 ofp = self.sopener(store.decodedir(name), 'w')
2196 2156 for chunk in util.filechunkiter(fp, limit=size):
2197 2157 ofp.write(chunk)
2198 2158 ofp.close()
2199 2159 elapsed = time.time() - start
2200 2160 if elapsed <= 0:
2201 2161 elapsed = 0.001
2202 2162 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2203 2163 (util.bytecount(total_bytes), elapsed,
2204 2164 util.bytecount(total_bytes / elapsed)))
2205 2165
2206 2166 # new requirements = old non-format requirements + new format-related
2207 2167 # requirements from the streamed-in repository
2208 2168 requirements.update(set(self.requirements) - self.supportedformats)
2209 2169 self._applyrequirements(requirements)
2210 2170 self._writerequirements()
2211 2171
2212 2172 self.invalidate()
2213 2173 return len(self.heads()) + 1
2214 2174 finally:
2215 2175 lock.release()
2216 2176
2217 2177 def clone(self, remote, heads=[], stream=False):
2218 2178 '''clone remote repository.
2219 2179
2220 2180 keyword arguments:
2221 2181 heads: list of revs to clone (forces use of pull)
2222 2182 stream: use streaming clone if possible'''
2223 2183
2224 2184 # now, all clients that can request uncompressed clones can
2225 2185 # read repo formats supported by all servers that can serve
2226 2186 # them.
2227 2187
2228 2188 # if revlog format changes, client will have to check version
2229 2189 # and format flags on "stream" capability, and use
2230 2190 # uncompressed only if compatible.
2231 2191
2232 2192 if stream and not heads:
2233 2193 # 'stream' means remote revlog format is revlogv1 only
2234 2194 if remote.capable('stream'):
2235 2195 return self.stream_in(remote, set(('revlogv1',)))
2236 2196 # otherwise, 'streamreqs' contains the remote revlog format
2237 2197 streamreqs = remote.capable('streamreqs')
2238 2198 if streamreqs:
2239 2199 streamreqs = set(streamreqs.split(','))
2240 2200 # if we support it, stream in and adjust our requirements
2241 2201 if not streamreqs - self.supportedformats:
2242 2202 return self.stream_in(remote, streamreqs)
2243 2203 return self.pull(remote, heads)
2244 2204
2245 2205 def pushkey(self, namespace, key, old, new):
2246 2206 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2247 2207 old=old, new=new)
2248 2208 ret = pushkey.push(self, namespace, key, old, new)
2249 2209 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2250 2210 ret=ret)
2251 2211 return ret
2252 2212
2253 2213 def listkeys(self, namespace):
2254 2214 self.hook('prelistkeys', throw=True, namespace=namespace)
2255 2215 values = pushkey.list(self, namespace)
2256 2216 self.hook('listkeys', namespace=namespace, values=values)
2257 2217 return values
2258 2218
2259 2219 def debugwireargs(self, one, two, three=None, four=None, five=None):
2260 2220 '''used to test argument passing over the wire'''
2261 2221 return "%s %s %s %s %s" % (one, two, three, four, five)
2262 2222
2263 2223 def savecommitmessage(self, text):
2264 2224 fp = self.opener('last-message.txt', 'wb')
2265 2225 try:
2266 2226 fp.write(text)
2267 2227 finally:
2268 2228 fp.close()
2269 2229 return self.pathto(fp.name[len(self.root)+1:])
2270 2230
2271 2231 # used to avoid circular references so destructors work
2272 2232 def aftertrans(files):
2273 2233 renamefiles = [tuple(t) for t in files]
2274 2234 def a():
2275 2235 for src, dest in renamefiles:
2276 2236 util.rename(src, dest)
2277 2237 return a
2278 2238
2279 2239 def undoname(fn):
2280 2240 base, name = os.path.split(fn)
2281 2241 assert name.startswith('journal')
2282 2242 return os.path.join(base, name.replace('journal', 'undo', 1))
2283 2243
2284 2244 def instance(ui, path, create):
2285 2245 return localrepository(ui, util.urllocalpath(path), create)
2286 2246
2287 2247 def islocal(path):
2288 2248 return True
@@ -1,283 +1,290 b''
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms of the
10 10 GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phases' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described below,
22 22 here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not permanent and
25 25 leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered, so they
28 28 can be considered from lowest to highest. The default, lowest phase is 'public'
29 29 - this is the normal phase of existing changesets. A child changeset can not be
30 30 in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 local commits are draft by default
40 40
41 41 Phase movement and exchange
42 42 ============================
43 43
44 44 Phase data are exchanged by pushkey on pull and push. Some server have a
45 45 publish option set, we call them publishing server. Pushing to such server make
46 46 draft changeset publish.
47 47
48 48 A small list of fact/rules define the exchange of phase:
49 49
50 50 * old client never changes server states
51 51 * pull never changes server states
52 52 * publish and old server csets are seen as public by client
53 53
54 54 * Any secret changeset seens in another repository is lowered to at least draft
55 55
56 56
57 57 Here is the final table summing up the 49 possible usecase of phase exchange:
58 58
59 59 server
60 60 old publish non-publish
61 61 N X N D P N D P
62 62 old client
63 63 pull
64 64 N - X/X - X/D X/P - X/D X/P
65 65 X - X/X - X/D X/P - X/D X/P
66 66 push
67 67 X X/X X/X X/P X/P X/P X/D X/D X/P
68 68 new client
69 69 pull
70 70 N - P/X - P/D P/P - D/D P/P
71 71 D - P/X - P/D P/P - D/D P/P
72 72 P - P/X - P/D P/P - P/D P/P
73 73 push
74 74 D P/X P/X P/P P/P P/P D/D D/D P/P
75 75 P P/X P/X P/P P/P P/P P/P P/P P/P
76 76
77 77 Legend:
78 78
79 79 A/B = final state on client / state on server
80 80
81 81 * N = new/not present,
82 82 * P = public,
83 83 * D = draft,
84 84 * X = not tracked (ie: the old client or server has no internal way of
85 85 recording the phase.)
86 86
87 87 passive = only pushes
88 88
89 89
90 90 A cell here can be read like this:
91 91
92 92 "When a new client pushes a draft changeset (D) to a publishing server
93 93 where it's not present (N), it's marked public on both sides (P/P)."
94 94
95 95 Note: old client behave as publish server with Draft only content
96 96 - other people see it as public
97 97 - content is pushed as draft
98 98
99 99 """
100 100
101 101 import errno
102 102 from node import nullid, bin, hex, short
103 103 from i18n import _
104 104
105 105 allphases = public, draft, secret = range(3)
106 106 trackedphases = allphases[1:]
107 107 phasenames = ['public', 'draft', 'secret']
108 108
109 109 def readroots(repo):
110 110 """Read phase roots from disk"""
111 111 roots = [set() for i in allphases]
112 112 roots[0].add(nullid)
113 113 try:
114 114 f = repo.sopener('phaseroots')
115 115 try:
116 116 for line in f:
117 117 phase, nh = line.strip().split()
118 118 roots[int(phase)].add(bin(nh))
119 119 finally:
120 120 f.close()
121 121 except IOError, inst:
122 122 if inst.errno != errno.ENOENT:
123 123 raise
124 124 return roots
125 125
126 126 def writeroots(repo):
127 127 """Write phase roots from disk"""
128 128 f = repo.sopener('phaseroots', 'w', atomictemp=True)
129 129 try:
130 130 for phase, roots in enumerate(repo._phaseroots):
131 131 for h in roots:
132 132 f.write('%i %s\n' % (phase, hex(h)))
133 133 repo._dirtyphases = False
134 134 finally:
135 135 f.close()
136 136
137 137 def filterunknown(repo, phaseroots=None):
138 138 """remove unknown nodes from the phase boundary
139 139
140 140 no data is lost as unknown node only old data for their descentants
141 141 """
142 142 if phaseroots is None:
143 143 phaseroots = repo._phaseroots
144 144 for phase, nodes in enumerate(phaseroots):
145 145 missing = [node for node in nodes if node not in repo]
146 146 if missing:
147 147 for mnode in missing:
148 148 msg = _('Removing unknown node %(n)s from %(p)i-phase boundary')
149 149 repo.ui.debug(msg, {'n': short(mnode), 'p': phase})
150 150 nodes.symmetric_difference_update(missing)
151 151 repo._dirtyphases = True
152 152
153 153 def advanceboundary(repo, targetphase, nodes):
154 154 """Add nodes to a phase changing other nodes phases if necessary.
155 155
156 156 This function move boundary *forward* this means that all nodes are set
157 157 in the target phase or kept in a *lower* phase.
158 158
159 159 Simplify boundary to contains phase roots only."""
160 160 delroots = [] # set of root deleted by this path
161 161 for phase in xrange(targetphase + 1, len(allphases)):
162 162 # filter nodes that are not in a compatible phase already
163 163 # XXX rev phase cache might have been invalidated by a previous loop
164 164 # XXX we need to be smarter here
165 165 nodes = [n for n in nodes if repo[n].phase() >= phase]
166 166 if not nodes:
167 167 break # no roots to move anymore
168 168 roots = repo._phaseroots[phase]
169 169 olds = roots.copy()
170 170 ctxs = list(repo.set('roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
171 171 roots.clear()
172 172 roots.update(ctx.node() for ctx in ctxs)
173 173 if olds != roots:
174 174 # invalidate cache (we probably could be smarter here
175 175 if '_phaserev' in vars(repo):
176 176 del repo._phaserev
177 177 repo._dirtyphases = True
178 178 # some roots may need to be declared for lower phases
179 179 delroots.extend(olds - roots)
180 180 # declare deleted root in the target phase
181 181 if targetphase != 0:
182 182 retractboundary(repo, targetphase, delroots)
183 183
184 184
185 185 def retractboundary(repo, targetphase, nodes):
186 186 """Set nodes back to a phase changing other nodes phases if necessary.
187 187
188 188 This function move boundary *backward* this means that all nodes are set
189 189 in the target phase or kept in a *higher* phase.
190 190
191 191 Simplify boundary to contains phase roots only."""
192 192 currentroots = repo._phaseroots[targetphase]
193 193 newroots = [n for n in nodes if repo[n].phase() < targetphase]
194 194 if newroots:
195 195 currentroots.update(newroots)
196 196 ctxs = repo.set('roots(%ln::)', currentroots)
197 197 currentroots.intersection_update(ctx.node() for ctx in ctxs)
198 198 if '_phaserev' in vars(repo):
199 199 del repo._phaserev
200 200 repo._dirtyphases = True
201 201
202 202
203 203 def listphases(repo):
204 204 """List phases root for serialisation over pushkey"""
205 205 keys = {}
206 for phase in trackedphases:
207 for root in repo._phaseroots[phase]:
208 keys[hex(root)] = '%i' % phase
206 value = '%i' % draft
207 for root in repo._phaseroots[draft]:
208 keys[hex(root)] = value
209
209 210 if repo.ui.configbool('phases', 'publish', True):
210 211 # Add an extra data to let remote know we are a publishing repo.
211 212 # Publishing repo can't just pretend they are old repo. When pushing to
212 213 # a publishing repo, the client still need to push phase boundary
213 214 #
214 215 # Push do not only push changeset. It also push phase data. New
215 216 # phase data may apply to common changeset which won't be push (as they
216 217 # are common). Here is a very simple example:
217 218 #
218 219 # 1) repo A push changeset X as draft to repo B
219 220 # 2) repo B make changeset X public
220 221 # 3) repo B push to repo A. X is not pushed but the data that X as now
221 222 # public should
222 223 #
223 224 # The server can't handle it on it's own as it has no idea of client
224 225 # phase data.
225 226 keys['publishing'] = 'True'
226 227 return keys
227 228
228 229 def pushphase(repo, nhex, oldphasestr, newphasestr):
229 230 """List phases root for serialisation over pushkey"""
230 231 lock = repo.lock()
231 232 try:
232 233 currentphase = repo[nhex].phase()
233 234 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
234 235 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
235 236 if currentphase == oldphase and newphase < oldphase:
236 237 advanceboundary(repo, newphase, [bin(nhex)])
237 238 return 1
238 239 else:
239 240 return 0
240 241 finally:
241 242 lock.release()
242 243
243 244 def visibleheads(repo):
244 245 """return the set of visible head of this repo"""
245 246 # XXX we want a cache on this
246 247 sroots = repo._phaseroots[secret]
247 248 if sroots:
248 249 # XXX very slow revset. storing heads or secret "boundary" would help.
249 250 revset = repo.set('heads(not (%ln::))', sroots)
250 251
251 252 vheads = [ctx.node() for ctx in revset]
252 253 if not vheads:
253 254 vheads.append(nullid)
254 255 else:
255 256 vheads = repo.heads()
256 257 return vheads
257 258
258 259 def analyzeremotephases(repo, subset, roots):
259 260 """Compute phases heads and root in a subset of node from root dict
260 261
261 262 * subset is heads of the subset
262 263 * roots is {<nodeid> => phase} mapping. key and value are string.
263 264
264 265 Accept unknown element input
265 266 """
266 267 # build list from dictionary
267 phaseroots = [[] for p in allphases]
268 draftroots = []
269 nm = repo.changelog.nodemap # to filter unknown node
268 270 for nhex, phase in roots.iteritems():
269 271 if nhex == 'publishing': # ignore data related to publish option
270 272 continue
271 273 node = bin(nhex)
272 274 phase = int(phase)
273 if node in repo:
274 phaseroots[phase].append(node)
275 if phase == 0:
276 if node != nullid:
277 msg = _('ignoring inconsistense public root from remote: %s')
278 repo.ui.warn(msg, nhex)
279 elif phase == 1:
280 if node in nm:
281 draftroots.append(node)
282 else:
283 msg = _('ignoring unexpected root from remote: %i %s')
284 repo.ui.warn(msg, phase, nhex)
275 285 # compute heads
276 phaseheads = [[] for p in allphases]
277 for phase in allphases[:-1]:
278 toproof = phaseroots[phase + 1]
279 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
280 subset, toproof, toproof, subset)
281 phaseheads[phase].extend(c.node() for c in revset)
282 return phaseheads, phaseroots
286 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
287 subset, draftroots, draftroots, subset)
288 publicheads = [c.node() for c in revset]
289 return publicheads, draftroots
283 290
@@ -1,121 +1,121 b''
1 1 $ "$TESTDIR/hghave" serve || exit 80
2 2
3 3 $ hg init test
4 4 $ cd test
5 5 $ echo a > a
6 6 $ hg ci -Ama
7 7 adding a
8 8 $ cd ..
9 9 $ hg clone test test2
10 10 updating to branch default
11 11 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
12 12 $ cd test2
13 13 $ echo a >> a
14 14 $ hg ci -mb
15 15 $ req() {
16 16 > hg serve -p $HGPORT -d --pid-file=hg.pid -E errors.log
17 17 > cat hg.pid >> $DAEMON_PIDS
18 18 > hg --cwd ../test2 push http://localhost:$HGPORT/
19 19 > "$TESTDIR/killdaemons.py"
20 20 > echo % serve errors
21 21 > cat errors.log
22 22 > }
23 23 $ cd ../test
24 24
25 25 expect ssl error
26 26
27 27 $ req
28 28 pushing to http://localhost:$HGPORT/
29 29 searching for changes
30 30 remote: ssl required
31 31 remote: ssl required
32 updating phase of ba677d0156c1 to 0 from 1 failed!
32 updating ba677d0156c1 to public failed!
33 33 % serve errors
34 34
35 35 expect authorization error
36 36
37 37 $ echo '[web]' > .hg/hgrc
38 38 $ echo 'push_ssl = false' >> .hg/hgrc
39 39 $ req
40 40 pushing to http://localhost:$HGPORT/
41 41 searching for changes
42 42 abort: authorization failed
43 43 % serve errors
44 44
45 45 expect authorization error: must have authorized user
46 46
47 47 $ echo 'allow_push = unperson' >> .hg/hgrc
48 48 $ req
49 49 pushing to http://localhost:$HGPORT/
50 50 searching for changes
51 51 abort: authorization failed
52 52 % serve errors
53 53
54 54 expect success
55 55
56 56 $ echo 'allow_push = *' >> .hg/hgrc
57 57 $ echo '[hooks]' >> .hg/hgrc
58 58 $ echo 'changegroup = python "$TESTDIR"/printenv.py changegroup 0' >> .hg/hgrc
59 59 $ req
60 60 pushing to http://localhost:$HGPORT/
61 61 searching for changes
62 62 remote: adding changesets
63 63 remote: adding manifests
64 64 remote: adding file changes
65 65 remote: added 1 changesets with 1 changes to 1 files
66 66 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
67 67 % serve errors
68 68 $ hg rollback
69 69 repository tip rolled back to revision 0 (undo serve)
70 70
71 71 expect success, server lacks the httpheader capability
72 72
73 73 $ CAP=httpheader
74 74 $ . "$TESTDIR/notcapable"
75 75 $ req
76 76 pushing to http://localhost:$HGPORT/
77 77 searching for changes
78 78 remote: adding changesets
79 79 remote: adding manifests
80 80 remote: adding file changes
81 81 remote: added 1 changesets with 1 changes to 1 files
82 82 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
83 83 % serve errors
84 84 $ hg rollback
85 85 repository tip rolled back to revision 0 (undo serve)
86 86
87 87 expect success, server lacks the unbundlehash capability
88 88
89 89 $ CAP=unbundlehash
90 90 $ . "$TESTDIR/notcapable"
91 91 $ req
92 92 pushing to http://localhost:$HGPORT/
93 93 searching for changes
94 94 remote: adding changesets
95 95 remote: adding manifests
96 96 remote: adding file changes
97 97 remote: added 1 changesets with 1 changes to 1 files
98 98 remote: changegroup hook: HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_URL=remote:http:*: (glob)
99 99 % serve errors
100 100 $ hg rollback
101 101 repository tip rolled back to revision 0 (undo serve)
102 102
103 103 expect authorization error: all users denied
104 104
105 105 $ echo '[web]' > .hg/hgrc
106 106 $ echo 'push_ssl = false' >> .hg/hgrc
107 107 $ echo 'deny_push = *' >> .hg/hgrc
108 108 $ req
109 109 pushing to http://localhost:$HGPORT/
110 110 searching for changes
111 111 abort: authorization failed
112 112 % serve errors
113 113
114 114 expect authorization error: some users denied, users must be authenticated
115 115
116 116 $ echo 'deny_push = unperson' >> .hg/hgrc
117 117 $ req
118 118 pushing to http://localhost:$HGPORT/
119 119 searching for changes
120 120 abort: authorization failed
121 121 % serve errors
General Comments 0
You need to be logged in to leave comments. Login now