##// END OF EJS Templates
obsolete: move obsolete markers read/write logic to obsstore object...
Pierre-Yves David -
r17124:f1b7683f default
parent child Browse files
Show More
@@ -1,2453 +1,2442
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 class localrepository(repo.repository):
27 27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
28 28 'known', 'getbundle'))
29 29 supportedformats = set(('revlogv1', 'generaldelta'))
30 30 supported = supportedformats | set(('store', 'fncache', 'shared',
31 31 'dotencode'))
32 32
33 33 def __init__(self, baseui, path=None, create=False):
34 34 repo.repository.__init__(self)
35 35 self.root = os.path.realpath(util.expandpath(path))
36 36 self.path = os.path.join(self.root, ".hg")
37 37 self.origroot = path
38 38 self.auditor = scmutil.pathauditor(self.root, self._checknested)
39 39 self.opener = scmutil.opener(self.path)
40 40 self.wopener = scmutil.opener(self.root)
41 41 self.baseui = baseui
42 42 self.ui = baseui.copy()
43 43 # A list of callback to shape the phase if no data were found.
44 44 # Callback are in the form: func(repo, roots) --> processed root.
45 45 # This list it to be filled by extension during repo setup
46 46 self._phasedefaults = []
47 47
48 48 try:
49 49 self.ui.readconfig(self.join("hgrc"), self.root)
50 50 extensions.loadall(self.ui)
51 51 except IOError:
52 52 pass
53 53
54 54 if not os.path.isdir(self.path):
55 55 if create:
56 56 if not os.path.exists(path):
57 57 util.makedirs(path)
58 58 util.makedir(self.path, notindexed=True)
59 59 requirements = ["revlogv1"]
60 60 if self.ui.configbool('format', 'usestore', True):
61 61 os.mkdir(os.path.join(self.path, "store"))
62 62 requirements.append("store")
63 63 if self.ui.configbool('format', 'usefncache', True):
64 64 requirements.append("fncache")
65 65 if self.ui.configbool('format', 'dotencode', True):
66 66 requirements.append('dotencode')
67 67 # create an invalid changelog
68 68 self.opener.append(
69 69 "00changelog.i",
70 70 '\0\0\0\2' # represents revlogv2
71 71 ' dummy changelog to prevent using the old repo layout'
72 72 )
73 73 if self.ui.configbool('format', 'generaldelta', False):
74 74 requirements.append("generaldelta")
75 75 requirements = set(requirements)
76 76 else:
77 77 raise error.RepoError(_("repository %s not found") % path)
78 78 elif create:
79 79 raise error.RepoError(_("repository %s already exists") % path)
80 80 else:
81 81 try:
82 82 requirements = scmutil.readrequires(self.opener, self.supported)
83 83 except IOError, inst:
84 84 if inst.errno != errno.ENOENT:
85 85 raise
86 86 requirements = set()
87 87
88 88 self.sharedpath = self.path
89 89 try:
90 90 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
91 91 if not os.path.exists(s):
92 92 raise error.RepoError(
93 93 _('.hg/sharedpath points to nonexistent directory %s') % s)
94 94 self.sharedpath = s
95 95 except IOError, inst:
96 96 if inst.errno != errno.ENOENT:
97 97 raise
98 98
99 99 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
100 100 self.spath = self.store.path
101 101 self.sopener = self.store.opener
102 102 self.sjoin = self.store.join
103 103 self.opener.createmode = self.store.createmode
104 104 self._applyrequirements(requirements)
105 105 if create:
106 106 self._writerequirements()
107 107
108 108
109 109 self._branchcache = None
110 110 self._branchcachetip = None
111 111 self.filterpats = {}
112 112 self._datafilters = {}
113 113 self._transref = self._lockref = self._wlockref = None
114 114
115 115 # A cache for various files under .hg/ that tracks file changes,
116 116 # (used by the filecache decorator)
117 117 #
118 118 # Maps a property name to its util.filecacheentry
119 119 self._filecache = {}
120 120
121 121 def _applyrequirements(self, requirements):
122 122 self.requirements = requirements
123 123 openerreqs = set(('revlogv1', 'generaldelta'))
124 124 self.sopener.options = dict((r, 1) for r in requirements
125 125 if r in openerreqs)
126 126
127 127 def _writerequirements(self):
128 128 reqfile = self.opener("requires", "w")
129 129 for r in self.requirements:
130 130 reqfile.write("%s\n" % r)
131 131 reqfile.close()
132 132
133 133 def _checknested(self, path):
134 134 """Determine if path is a legal nested repository."""
135 135 if not path.startswith(self.root):
136 136 return False
137 137 subpath = path[len(self.root) + 1:]
138 138 normsubpath = util.pconvert(subpath)
139 139
140 140 # XXX: Checking against the current working copy is wrong in
141 141 # the sense that it can reject things like
142 142 #
143 143 # $ hg cat -r 10 sub/x.txt
144 144 #
145 145 # if sub/ is no longer a subrepository in the working copy
146 146 # parent revision.
147 147 #
148 148 # However, it can of course also allow things that would have
149 149 # been rejected before, such as the above cat command if sub/
150 150 # is a subrepository now, but was a normal directory before.
151 151 # The old path auditor would have rejected by mistake since it
152 152 # panics when it sees sub/.hg/.
153 153 #
154 154 # All in all, checking against the working copy seems sensible
155 155 # since we want to prevent access to nested repositories on
156 156 # the filesystem *now*.
157 157 ctx = self[None]
158 158 parts = util.splitpath(subpath)
159 159 while parts:
160 160 prefix = '/'.join(parts)
161 161 if prefix in ctx.substate:
162 162 if prefix == normsubpath:
163 163 return True
164 164 else:
165 165 sub = ctx.sub(prefix)
166 166 return sub.checknested(subpath[len(prefix) + 1:])
167 167 else:
168 168 parts.pop()
169 169 return False
170 170
171 171 @filecache('bookmarks')
172 172 def _bookmarks(self):
173 173 return bookmarks.read(self)
174 174
175 175 @filecache('bookmarks.current')
176 176 def _bookmarkcurrent(self):
177 177 return bookmarks.readcurrent(self)
178 178
179 179 def _writebookmarks(self, marks):
180 180 bookmarks.write(self)
181 181
182 182 def bookmarkheads(self, bookmark):
183 183 name = bookmark.split('@', 1)[0]
184 184 heads = []
185 185 for mark, n in self._bookmarks.iteritems():
186 186 if mark.split('@', 1)[0] == name:
187 187 heads.append(n)
188 188 return heads
189 189
190 190 @storecache('phaseroots')
191 191 def _phasecache(self):
192 192 return phases.phasecache(self, self._phasedefaults)
193 193
194 194 @storecache('obsstore')
195 195 def obsstore(self):
196 store = obsolete.obsstore()
197 data = self.sopener.tryread('obsstore')
198 if data:
199 store.loadmarkers(data)
196 store = obsolete.obsstore(self.sopener)
200 197 return store
201 198
202 199 @storecache('00changelog.i')
203 200 def changelog(self):
204 201 c = changelog.changelog(self.sopener)
205 202 if 'HG_PENDING' in os.environ:
206 203 p = os.environ['HG_PENDING']
207 204 if p.startswith(self.root):
208 205 c.readpending('00changelog.i.a')
209 206 return c
210 207
211 208 @storecache('00manifest.i')
212 209 def manifest(self):
213 210 return manifest.manifest(self.sopener)
214 211
215 212 @filecache('dirstate')
216 213 def dirstate(self):
217 214 warned = [0]
218 215 def validate(node):
219 216 try:
220 217 self.changelog.rev(node)
221 218 return node
222 219 except error.LookupError:
223 220 if not warned[0]:
224 221 warned[0] = True
225 222 self.ui.warn(_("warning: ignoring unknown"
226 223 " working parent %s!\n") % short(node))
227 224 return nullid
228 225
229 226 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
230 227
231 228 def __getitem__(self, changeid):
232 229 if changeid is None:
233 230 return context.workingctx(self)
234 231 return context.changectx(self, changeid)
235 232
236 233 def __contains__(self, changeid):
237 234 try:
238 235 return bool(self.lookup(changeid))
239 236 except error.RepoLookupError:
240 237 return False
241 238
242 239 def __nonzero__(self):
243 240 return True
244 241
245 242 def __len__(self):
246 243 return len(self.changelog)
247 244
248 245 def __iter__(self):
249 246 for i in xrange(len(self)):
250 247 yield i
251 248
252 249 def revs(self, expr, *args):
253 250 '''Return a list of revisions matching the given revset'''
254 251 expr = revset.formatspec(expr, *args)
255 252 m = revset.match(None, expr)
256 253 return [r for r in m(self, range(len(self)))]
257 254
258 255 def set(self, expr, *args):
259 256 '''
260 257 Yield a context for each matching revision, after doing arg
261 258 replacement via revset.formatspec
262 259 '''
263 260 for r in self.revs(expr, *args):
264 261 yield self[r]
265 262
266 263 def url(self):
267 264 return 'file:' + self.root
268 265
269 266 def hook(self, name, throw=False, **args):
270 267 return hook.hook(self.ui, self, name, throw, **args)
271 268
272 269 tag_disallowed = ':\r\n'
273 270
274 271 def _tag(self, names, node, message, local, user, date, extra={}):
275 272 if isinstance(names, str):
276 273 allchars = names
277 274 names = (names,)
278 275 else:
279 276 allchars = ''.join(names)
280 277 for c in self.tag_disallowed:
281 278 if c in allchars:
282 279 raise util.Abort(_('%r cannot be used in a tag name') % c)
283 280
284 281 branches = self.branchmap()
285 282 for name in names:
286 283 self.hook('pretag', throw=True, node=hex(node), tag=name,
287 284 local=local)
288 285 if name in branches:
289 286 self.ui.warn(_("warning: tag %s conflicts with existing"
290 287 " branch name\n") % name)
291 288
292 289 def writetags(fp, names, munge, prevtags):
293 290 fp.seek(0, 2)
294 291 if prevtags and prevtags[-1] != '\n':
295 292 fp.write('\n')
296 293 for name in names:
297 294 m = munge and munge(name) or name
298 295 if (self._tagscache.tagtypes and
299 296 name in self._tagscache.tagtypes):
300 297 old = self.tags().get(name, nullid)
301 298 fp.write('%s %s\n' % (hex(old), m))
302 299 fp.write('%s %s\n' % (hex(node), m))
303 300 fp.close()
304 301
305 302 prevtags = ''
306 303 if local:
307 304 try:
308 305 fp = self.opener('localtags', 'r+')
309 306 except IOError:
310 307 fp = self.opener('localtags', 'a')
311 308 else:
312 309 prevtags = fp.read()
313 310
314 311 # local tags are stored in the current charset
315 312 writetags(fp, names, None, prevtags)
316 313 for name in names:
317 314 self.hook('tag', node=hex(node), tag=name, local=local)
318 315 return
319 316
320 317 try:
321 318 fp = self.wfile('.hgtags', 'rb+')
322 319 except IOError, e:
323 320 if e.errno != errno.ENOENT:
324 321 raise
325 322 fp = self.wfile('.hgtags', 'ab')
326 323 else:
327 324 prevtags = fp.read()
328 325
329 326 # committed tags are stored in UTF-8
330 327 writetags(fp, names, encoding.fromlocal, prevtags)
331 328
332 329 fp.close()
333 330
334 331 self.invalidatecaches()
335 332
336 333 if '.hgtags' not in self.dirstate:
337 334 self[None].add(['.hgtags'])
338 335
339 336 m = matchmod.exact(self.root, '', ['.hgtags'])
340 337 tagnode = self.commit(message, user, date, extra=extra, match=m)
341 338
342 339 for name in names:
343 340 self.hook('tag', node=hex(node), tag=name, local=local)
344 341
345 342 return tagnode
346 343
347 344 def tag(self, names, node, message, local, user, date):
348 345 '''tag a revision with one or more symbolic names.
349 346
350 347 names is a list of strings or, when adding a single tag, names may be a
351 348 string.
352 349
353 350 if local is True, the tags are stored in a per-repository file.
354 351 otherwise, they are stored in the .hgtags file, and a new
355 352 changeset is committed with the change.
356 353
357 354 keyword arguments:
358 355
359 356 local: whether to store tags in non-version-controlled file
360 357 (default False)
361 358
362 359 message: commit message to use if committing
363 360
364 361 user: name of user to use if committing
365 362
366 363 date: date tuple to use if committing'''
367 364
368 365 if not local:
369 366 for x in self.status()[:5]:
370 367 if '.hgtags' in x:
371 368 raise util.Abort(_('working copy of .hgtags is changed '
372 369 '(please commit .hgtags manually)'))
373 370
374 371 self.tags() # instantiate the cache
375 372 self._tag(names, node, message, local, user, date)
376 373
377 374 @propertycache
378 375 def _tagscache(self):
379 376 '''Returns a tagscache object that contains various tags related
380 377 caches.'''
381 378
382 379 # This simplifies its cache management by having one decorated
383 380 # function (this one) and the rest simply fetch things from it.
384 381 class tagscache(object):
385 382 def __init__(self):
386 383 # These two define the set of tags for this repository. tags
387 384 # maps tag name to node; tagtypes maps tag name to 'global' or
388 385 # 'local'. (Global tags are defined by .hgtags across all
389 386 # heads, and local tags are defined in .hg/localtags.)
390 387 # They constitute the in-memory cache of tags.
391 388 self.tags = self.tagtypes = None
392 389
393 390 self.nodetagscache = self.tagslist = None
394 391
395 392 cache = tagscache()
396 393 cache.tags, cache.tagtypes = self._findtags()
397 394
398 395 return cache
399 396
400 397 def tags(self):
401 398 '''return a mapping of tag to node'''
402 399 t = {}
403 400 for k, v in self._tagscache.tags.iteritems():
404 401 try:
405 402 # ignore tags to unknown nodes
406 403 self.changelog.rev(v)
407 404 t[k] = v
408 405 except (error.LookupError, ValueError):
409 406 pass
410 407 return t
411 408
412 409 def _findtags(self):
413 410 '''Do the hard work of finding tags. Return a pair of dicts
414 411 (tags, tagtypes) where tags maps tag name to node, and tagtypes
415 412 maps tag name to a string like \'global\' or \'local\'.
416 413 Subclasses or extensions are free to add their own tags, but
417 414 should be aware that the returned dicts will be retained for the
418 415 duration of the localrepo object.'''
419 416
420 417 # XXX what tagtype should subclasses/extensions use? Currently
421 418 # mq and bookmarks add tags, but do not set the tagtype at all.
422 419 # Should each extension invent its own tag type? Should there
423 420 # be one tagtype for all such "virtual" tags? Or is the status
424 421 # quo fine?
425 422
426 423 alltags = {} # map tag name to (node, hist)
427 424 tagtypes = {}
428 425
429 426 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
430 427 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
431 428
432 429 # Build the return dicts. Have to re-encode tag names because
433 430 # the tags module always uses UTF-8 (in order not to lose info
434 431 # writing to the cache), but the rest of Mercurial wants them in
435 432 # local encoding.
436 433 tags = {}
437 434 for (name, (node, hist)) in alltags.iteritems():
438 435 if node != nullid:
439 436 tags[encoding.tolocal(name)] = node
440 437 tags['tip'] = self.changelog.tip()
441 438 tagtypes = dict([(encoding.tolocal(name), value)
442 439 for (name, value) in tagtypes.iteritems()])
443 440 return (tags, tagtypes)
444 441
445 442 def tagtype(self, tagname):
446 443 '''
447 444 return the type of the given tag. result can be:
448 445
449 446 'local' : a local tag
450 447 'global' : a global tag
451 448 None : tag does not exist
452 449 '''
453 450
454 451 return self._tagscache.tagtypes.get(tagname)
455 452
456 453 def tagslist(self):
457 454 '''return a list of tags ordered by revision'''
458 455 if not self._tagscache.tagslist:
459 456 l = []
460 457 for t, n in self.tags().iteritems():
461 458 r = self.changelog.rev(n)
462 459 l.append((r, t, n))
463 460 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
464 461
465 462 return self._tagscache.tagslist
466 463
467 464 def nodetags(self, node):
468 465 '''return the tags associated with a node'''
469 466 if not self._tagscache.nodetagscache:
470 467 nodetagscache = {}
471 468 for t, n in self._tagscache.tags.iteritems():
472 469 nodetagscache.setdefault(n, []).append(t)
473 470 for tags in nodetagscache.itervalues():
474 471 tags.sort()
475 472 self._tagscache.nodetagscache = nodetagscache
476 473 return self._tagscache.nodetagscache.get(node, [])
477 474
478 475 def nodebookmarks(self, node):
479 476 marks = []
480 477 for bookmark, n in self._bookmarks.iteritems():
481 478 if n == node:
482 479 marks.append(bookmark)
483 480 return sorted(marks)
484 481
485 482 def _branchtags(self, partial, lrev):
486 483 # TODO: rename this function?
487 484 tiprev = len(self) - 1
488 485 if lrev != tiprev:
489 486 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
490 487 self._updatebranchcache(partial, ctxgen)
491 488 self._writebranchcache(partial, self.changelog.tip(), tiprev)
492 489
493 490 return partial
494 491
495 492 def updatebranchcache(self):
496 493 tip = self.changelog.tip()
497 494 if self._branchcache is not None and self._branchcachetip == tip:
498 495 return
499 496
500 497 oldtip = self._branchcachetip
501 498 self._branchcachetip = tip
502 499 if oldtip is None or oldtip not in self.changelog.nodemap:
503 500 partial, last, lrev = self._readbranchcache()
504 501 else:
505 502 lrev = self.changelog.rev(oldtip)
506 503 partial = self._branchcache
507 504
508 505 self._branchtags(partial, lrev)
509 506 # this private cache holds all heads (not just the branch tips)
510 507 self._branchcache = partial
511 508
512 509 def branchmap(self):
513 510 '''returns a dictionary {branch: [branchheads]}'''
514 511 self.updatebranchcache()
515 512 return self._branchcache
516 513
517 514 def _branchtip(self, heads):
518 515 '''return the tipmost branch head in heads'''
519 516 tip = heads[-1]
520 517 for h in reversed(heads):
521 518 if not self[h].closesbranch():
522 519 tip = h
523 520 break
524 521 return tip
525 522
526 523 def branchtip(self, branch):
527 524 '''return the tip node for a given branch'''
528 525 if branch not in self.branchmap():
529 526 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
530 527 return self._branchtip(self.branchmap()[branch])
531 528
532 529 def branchtags(self):
533 530 '''return a dict where branch names map to the tipmost head of
534 531 the branch, open heads come before closed'''
535 532 bt = {}
536 533 for bn, heads in self.branchmap().iteritems():
537 534 bt[bn] = self._branchtip(heads)
538 535 return bt
539 536
540 537 def _readbranchcache(self):
541 538 partial = {}
542 539 try:
543 540 f = self.opener("cache/branchheads")
544 541 lines = f.read().split('\n')
545 542 f.close()
546 543 except (IOError, OSError):
547 544 return {}, nullid, nullrev
548 545
549 546 try:
550 547 last, lrev = lines.pop(0).split(" ", 1)
551 548 last, lrev = bin(last), int(lrev)
552 549 if lrev >= len(self) or self[lrev].node() != last:
553 550 # invalidate the cache
554 551 raise ValueError('invalidating branch cache (tip differs)')
555 552 for l in lines:
556 553 if not l:
557 554 continue
558 555 node, label = l.split(" ", 1)
559 556 label = encoding.tolocal(label.strip())
560 557 if not node in self:
561 558 raise ValueError('invalidating branch cache because node '+
562 559 '%s does not exist' % node)
563 560 partial.setdefault(label, []).append(bin(node))
564 561 except KeyboardInterrupt:
565 562 raise
566 563 except Exception, inst:
567 564 if self.ui.debugflag:
568 565 self.ui.warn(str(inst), '\n')
569 566 partial, last, lrev = {}, nullid, nullrev
570 567 return partial, last, lrev
571 568
572 569 def _writebranchcache(self, branches, tip, tiprev):
573 570 try:
574 571 f = self.opener("cache/branchheads", "w", atomictemp=True)
575 572 f.write("%s %s\n" % (hex(tip), tiprev))
576 573 for label, nodes in branches.iteritems():
577 574 for node in nodes:
578 575 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
579 576 f.close()
580 577 except (IOError, OSError):
581 578 pass
582 579
583 580 def _updatebranchcache(self, partial, ctxgen):
584 581 """Given a branchhead cache, partial, that may have extra nodes or be
585 582 missing heads, and a generator of nodes that are at least a superset of
586 583 heads missing, this function updates partial to be correct.
587 584 """
588 585 # collect new branch entries
589 586 newbranches = {}
590 587 for c in ctxgen:
591 588 newbranches.setdefault(c.branch(), []).append(c.node())
592 589 # if older branchheads are reachable from new ones, they aren't
593 590 # really branchheads. Note checking parents is insufficient:
594 591 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
595 592 for branch, newnodes in newbranches.iteritems():
596 593 bheads = partial.setdefault(branch, [])
597 594 # Remove candidate heads that no longer are in the repo (e.g., as
598 595 # the result of a strip that just happened). Avoid using 'node in
599 596 # self' here because that dives down into branchcache code somewhat
600 597 # recrusively.
601 598 bheadrevs = [self.changelog.rev(node) for node in bheads
602 599 if self.changelog.hasnode(node)]
603 600 newheadrevs = [self.changelog.rev(node) for node in newnodes
604 601 if self.changelog.hasnode(node)]
605 602 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
606 603 # Remove duplicates - nodes that are in newheadrevs and are already
607 604 # in bheadrevs. This can happen if you strip a node whose parent
608 605 # was already a head (because they're on different branches).
609 606 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
610 607
611 608 # Starting from tip means fewer passes over reachable. If we know
612 609 # the new candidates are not ancestors of existing heads, we don't
613 610 # have to examine ancestors of existing heads
614 611 if ctxisnew:
615 612 iterrevs = sorted(newheadrevs)
616 613 else:
617 614 iterrevs = list(bheadrevs)
618 615
619 616 # This loop prunes out two kinds of heads - heads that are
620 617 # superceded by a head in newheadrevs, and newheadrevs that are not
621 618 # heads because an existing head is their descendant.
622 619 while iterrevs:
623 620 latest = iterrevs.pop()
624 621 if latest not in bheadrevs:
625 622 continue
626 623 ancestors = set(self.changelog.ancestors([latest],
627 624 bheadrevs[0]))
628 625 if ancestors:
629 626 bheadrevs = [b for b in bheadrevs if b not in ancestors]
630 627 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
631 628
632 629 # There may be branches that cease to exist when the last commit in the
633 630 # branch was stripped. This code filters them out. Note that the
634 631 # branch that ceased to exist may not be in newbranches because
635 632 # newbranches is the set of candidate heads, which when you strip the
636 633 # last commit in a branch will be the parent branch.
637 634 for branch in partial:
638 635 nodes = [head for head in partial[branch]
639 636 if self.changelog.hasnode(head)]
640 637 if not nodes:
641 638 del partial[branch]
642 639
643 640 def lookup(self, key):
644 641 return self[key].node()
645 642
646 643 def lookupbranch(self, key, remote=None):
647 644 repo = remote or self
648 645 if key in repo.branchmap():
649 646 return key
650 647
651 648 repo = (remote and remote.local()) and remote or self
652 649 return repo[key].branch()
653 650
654 651 def known(self, nodes):
655 652 nm = self.changelog.nodemap
656 653 pc = self._phasecache
657 654 result = []
658 655 for n in nodes:
659 656 r = nm.get(n)
660 657 resp = not (r is None or pc.phase(self, r) >= phases.secret)
661 658 result.append(resp)
662 659 return result
663 660
664 661 def local(self):
665 662 return self
666 663
667 664 def join(self, f):
668 665 return os.path.join(self.path, f)
669 666
670 667 def wjoin(self, f):
671 668 return os.path.join(self.root, f)
672 669
673 670 def file(self, f):
674 671 if f[0] == '/':
675 672 f = f[1:]
676 673 return filelog.filelog(self.sopener, f)
677 674
678 675 def changectx(self, changeid):
679 676 return self[changeid]
680 677
681 678 def parents(self, changeid=None):
682 679 '''get list of changectxs for parents of changeid'''
683 680 return self[changeid].parents()
684 681
685 682 def setparents(self, p1, p2=nullid):
686 683 copies = self.dirstate.setparents(p1, p2)
687 684 if copies:
688 685 # Adjust copy records, the dirstate cannot do it, it
689 686 # requires access to parents manifests. Preserve them
690 687 # only for entries added to first parent.
691 688 pctx = self[p1]
692 689 for f in copies:
693 690 if f not in pctx and copies[f] in pctx:
694 691 self.dirstate.copy(copies[f], f)
695 692
696 693 def filectx(self, path, changeid=None, fileid=None):
697 694 """changeid can be a changeset revision, node, or tag.
698 695 fileid can be a file revision or node."""
699 696 return context.filectx(self, path, changeid, fileid)
700 697
701 698 def getcwd(self):
702 699 return self.dirstate.getcwd()
703 700
704 701 def pathto(self, f, cwd=None):
705 702 return self.dirstate.pathto(f, cwd)
706 703
707 704 def wfile(self, f, mode='r'):
708 705 return self.wopener(f, mode)
709 706
710 707 def _link(self, f):
711 708 return os.path.islink(self.wjoin(f))
712 709
713 710 def _loadfilter(self, filter):
714 711 if filter not in self.filterpats:
715 712 l = []
716 713 for pat, cmd in self.ui.configitems(filter):
717 714 if cmd == '!':
718 715 continue
719 716 mf = matchmod.match(self.root, '', [pat])
720 717 fn = None
721 718 params = cmd
722 719 for name, filterfn in self._datafilters.iteritems():
723 720 if cmd.startswith(name):
724 721 fn = filterfn
725 722 params = cmd[len(name):].lstrip()
726 723 break
727 724 if not fn:
728 725 fn = lambda s, c, **kwargs: util.filter(s, c)
729 726 # Wrap old filters not supporting keyword arguments
730 727 if not inspect.getargspec(fn)[2]:
731 728 oldfn = fn
732 729 fn = lambda s, c, **kwargs: oldfn(s, c)
733 730 l.append((mf, fn, params))
734 731 self.filterpats[filter] = l
735 732 return self.filterpats[filter]
736 733
737 734 def _filter(self, filterpats, filename, data):
738 735 for mf, fn, cmd in filterpats:
739 736 if mf(filename):
740 737 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
741 738 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
742 739 break
743 740
744 741 return data
745 742
746 743 @propertycache
747 744 def _encodefilterpats(self):
748 745 return self._loadfilter('encode')
749 746
750 747 @propertycache
751 748 def _decodefilterpats(self):
752 749 return self._loadfilter('decode')
753 750
754 751 def adddatafilter(self, name, filter):
755 752 self._datafilters[name] = filter
756 753
757 754 def wread(self, filename):
758 755 if self._link(filename):
759 756 data = os.readlink(self.wjoin(filename))
760 757 else:
761 758 data = self.wopener.read(filename)
762 759 return self._filter(self._encodefilterpats, filename, data)
763 760
764 761 def wwrite(self, filename, data, flags):
765 762 data = self._filter(self._decodefilterpats, filename, data)
766 763 if 'l' in flags:
767 764 self.wopener.symlink(data, filename)
768 765 else:
769 766 self.wopener.write(filename, data)
770 767 if 'x' in flags:
771 768 util.setflags(self.wjoin(filename), False, True)
772 769
773 770 def wwritedata(self, filename, data):
774 771 return self._filter(self._decodefilterpats, filename, data)
775 772
776 773 def transaction(self, desc):
777 774 tr = self._transref and self._transref() or None
778 775 if tr and tr.running():
779 776 return tr.nest()
780 777
781 778 # abort here if the journal already exists
782 779 if os.path.exists(self.sjoin("journal")):
783 780 raise error.RepoError(
784 781 _("abandoned transaction found - run hg recover"))
785 782
786 783 self._writejournal(desc)
787 784 renames = [(x, undoname(x)) for x in self._journalfiles()]
788 785
789 786 tr = transaction.transaction(self.ui.warn, self.sopener,
790 787 self.sjoin("journal"),
791 788 aftertrans(renames),
792 789 self.store.createmode)
793 790 self._transref = weakref.ref(tr)
794 791 return tr
795 792
796 793 def _journalfiles(self):
797 794 return (self.sjoin('journal'), self.join('journal.dirstate'),
798 795 self.join('journal.branch'), self.join('journal.desc'),
799 796 self.join('journal.bookmarks'),
800 797 self.sjoin('journal.phaseroots'))
801 798
802 799 def undofiles(self):
803 800 return [undoname(x) for x in self._journalfiles()]
804 801
805 802 def _writejournal(self, desc):
806 803 self.opener.write("journal.dirstate",
807 804 self.opener.tryread("dirstate"))
808 805 self.opener.write("journal.branch",
809 806 encoding.fromlocal(self.dirstate.branch()))
810 807 self.opener.write("journal.desc",
811 808 "%d\n%s\n" % (len(self), desc))
812 809 self.opener.write("journal.bookmarks",
813 810 self.opener.tryread("bookmarks"))
814 811 self.sopener.write("journal.phaseroots",
815 812 self.sopener.tryread("phaseroots"))
816 813
817 814 def recover(self):
818 815 lock = self.lock()
819 816 try:
820 817 if os.path.exists(self.sjoin("journal")):
821 818 self.ui.status(_("rolling back interrupted transaction\n"))
822 819 transaction.rollback(self.sopener, self.sjoin("journal"),
823 820 self.ui.warn)
824 821 self.invalidate()
825 822 return True
826 823 else:
827 824 self.ui.warn(_("no interrupted transaction available\n"))
828 825 return False
829 826 finally:
830 827 lock.release()
831 828
832 829 def rollback(self, dryrun=False, force=False):
833 830 wlock = lock = None
834 831 try:
835 832 wlock = self.wlock()
836 833 lock = self.lock()
837 834 if os.path.exists(self.sjoin("undo")):
838 835 return self._rollback(dryrun, force)
839 836 else:
840 837 self.ui.warn(_("no rollback information available\n"))
841 838 return 1
842 839 finally:
843 840 release(lock, wlock)
844 841
845 842 def _rollback(self, dryrun, force):
846 843 ui = self.ui
847 844 try:
848 845 args = self.opener.read('undo.desc').splitlines()
849 846 (oldlen, desc, detail) = (int(args[0]), args[1], None)
850 847 if len(args) >= 3:
851 848 detail = args[2]
852 849 oldtip = oldlen - 1
853 850
854 851 if detail and ui.verbose:
855 852 msg = (_('repository tip rolled back to revision %s'
856 853 ' (undo %s: %s)\n')
857 854 % (oldtip, desc, detail))
858 855 else:
859 856 msg = (_('repository tip rolled back to revision %s'
860 857 ' (undo %s)\n')
861 858 % (oldtip, desc))
862 859 except IOError:
863 860 msg = _('rolling back unknown transaction\n')
864 861 desc = None
865 862
866 863 if not force and self['.'] != self['tip'] and desc == 'commit':
867 864 raise util.Abort(
868 865 _('rollback of last commit while not checked out '
869 866 'may lose data'), hint=_('use -f to force'))
870 867
871 868 ui.status(msg)
872 869 if dryrun:
873 870 return 0
874 871
875 872 parents = self.dirstate.parents()
876 873 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
877 874 if os.path.exists(self.join('undo.bookmarks')):
878 875 util.rename(self.join('undo.bookmarks'),
879 876 self.join('bookmarks'))
880 877 if os.path.exists(self.sjoin('undo.phaseroots')):
881 878 util.rename(self.sjoin('undo.phaseroots'),
882 879 self.sjoin('phaseroots'))
883 880 self.invalidate()
884 881
885 882 parentgone = (parents[0] not in self.changelog.nodemap or
886 883 parents[1] not in self.changelog.nodemap)
887 884 if parentgone:
888 885 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
889 886 try:
890 887 branch = self.opener.read('undo.branch')
891 888 self.dirstate.setbranch(branch)
892 889 except IOError:
893 890 ui.warn(_('named branch could not be reset: '
894 891 'current branch is still \'%s\'\n')
895 892 % self.dirstate.branch())
896 893
897 894 self.dirstate.invalidate()
898 895 parents = tuple([p.rev() for p in self.parents()])
899 896 if len(parents) > 1:
900 897 ui.status(_('working directory now based on '
901 898 'revisions %d and %d\n') % parents)
902 899 else:
903 900 ui.status(_('working directory now based on '
904 901 'revision %d\n') % parents)
905 902 # TODO: if we know which new heads may result from this rollback, pass
906 903 # them to destroy(), which will prevent the branchhead cache from being
907 904 # invalidated.
908 905 self.destroyed()
909 906 return 0
910 907
911 908 def invalidatecaches(self):
912 909 def delcache(name):
913 910 try:
914 911 delattr(self, name)
915 912 except AttributeError:
916 913 pass
917 914
918 915 delcache('_tagscache')
919 916
920 917 self._branchcache = None # in UTF-8
921 918 self._branchcachetip = None
922 919
923 920 def invalidatedirstate(self):
924 921 '''Invalidates the dirstate, causing the next call to dirstate
925 922 to check if it was modified since the last time it was read,
926 923 rereading it if it has.
927 924
928 925 This is different to dirstate.invalidate() that it doesn't always
929 926 rereads the dirstate. Use dirstate.invalidate() if you want to
930 927 explicitly read the dirstate again (i.e. restoring it to a previous
931 928 known good state).'''
932 929 if 'dirstate' in self.__dict__:
933 930 for k in self.dirstate._filecache:
934 931 try:
935 932 delattr(self.dirstate, k)
936 933 except AttributeError:
937 934 pass
938 935 delattr(self, 'dirstate')
939 936
940 937 def invalidate(self):
941 938 for k in self._filecache:
942 939 # dirstate is invalidated separately in invalidatedirstate()
943 940 if k == 'dirstate':
944 941 continue
945 942
946 943 try:
947 944 delattr(self, k)
948 945 except AttributeError:
949 946 pass
950 947 self.invalidatecaches()
951 948
952 949 # Discard all cache entries to force reloading everything.
953 950 self._filecache.clear()
954 951
955 952 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
956 953 try:
957 954 l = lock.lock(lockname, 0, releasefn, desc=desc)
958 955 except error.LockHeld, inst:
959 956 if not wait:
960 957 raise
961 958 self.ui.warn(_("waiting for lock on %s held by %r\n") %
962 959 (desc, inst.locker))
963 960 # default to 600 seconds timeout
964 961 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
965 962 releasefn, desc=desc)
966 963 if acquirefn:
967 964 acquirefn()
968 965 return l
969 966
970 967 def _afterlock(self, callback):
971 968 """add a callback to the current repository lock.
972 969
973 970 The callback will be executed on lock release."""
974 971 l = self._lockref and self._lockref()
975 972 if l:
976 973 l.postrelease.append(callback)
977 974 else:
978 975 callback()
979 976
980 977 def lock(self, wait=True):
981 978 '''Lock the repository store (.hg/store) and return a weak reference
982 979 to the lock. Use this before modifying the store (e.g. committing or
983 980 stripping). If you are opening a transaction, get a lock as well.)'''
984 981 l = self._lockref and self._lockref()
985 982 if l is not None and l.held:
986 983 l.lock()
987 984 return l
988 985
989 986 def unlock():
990 987 self.store.write()
991 988 if '_phasecache' in vars(self):
992 989 self._phasecache.write()
993 if 'obsstore' in vars(self) and self.obsstore._new:
994 # XXX: transaction logic should be used here. But for
995 # now rewriting the whole file is good enough.
996 f = self.sopener('obsstore', 'wb', atomictemp=True)
997 try:
998 self.obsstore.flushmarkers(f)
999 f.close()
1000 except: # re-raises
1001 f.discard()
1002 raise
990 if 'obsstore' in vars(self):
991 self.obsstore.flushmarkers()
1003 992 for k, ce in self._filecache.items():
1004 993 if k == 'dirstate':
1005 994 continue
1006 995 ce.refresh()
1007 996
1008 997 l = self._lock(self.sjoin("lock"), wait, unlock,
1009 998 self.invalidate, _('repository %s') % self.origroot)
1010 999 self._lockref = weakref.ref(l)
1011 1000 return l
1012 1001
1013 1002 def wlock(self, wait=True):
1014 1003 '''Lock the non-store parts of the repository (everything under
1015 1004 .hg except .hg/store) and return a weak reference to the lock.
1016 1005 Use this before modifying files in .hg.'''
1017 1006 l = self._wlockref and self._wlockref()
1018 1007 if l is not None and l.held:
1019 1008 l.lock()
1020 1009 return l
1021 1010
1022 1011 def unlock():
1023 1012 self.dirstate.write()
1024 1013 ce = self._filecache.get('dirstate')
1025 1014 if ce:
1026 1015 ce.refresh()
1027 1016
1028 1017 l = self._lock(self.join("wlock"), wait, unlock,
1029 1018 self.invalidatedirstate, _('working directory of %s') %
1030 1019 self.origroot)
1031 1020 self._wlockref = weakref.ref(l)
1032 1021 return l
1033 1022
1034 1023 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1035 1024 """
1036 1025 commit an individual file as part of a larger transaction
1037 1026 """
1038 1027
1039 1028 fname = fctx.path()
1040 1029 text = fctx.data()
1041 1030 flog = self.file(fname)
1042 1031 fparent1 = manifest1.get(fname, nullid)
1043 1032 fparent2 = fparent2o = manifest2.get(fname, nullid)
1044 1033
1045 1034 meta = {}
1046 1035 copy = fctx.renamed()
1047 1036 if copy and copy[0] != fname:
1048 1037 # Mark the new revision of this file as a copy of another
1049 1038 # file. This copy data will effectively act as a parent
1050 1039 # of this new revision. If this is a merge, the first
1051 1040 # parent will be the nullid (meaning "look up the copy data")
1052 1041 # and the second one will be the other parent. For example:
1053 1042 #
1054 1043 # 0 --- 1 --- 3 rev1 changes file foo
1055 1044 # \ / rev2 renames foo to bar and changes it
1056 1045 # \- 2 -/ rev3 should have bar with all changes and
1057 1046 # should record that bar descends from
1058 1047 # bar in rev2 and foo in rev1
1059 1048 #
1060 1049 # this allows this merge to succeed:
1061 1050 #
1062 1051 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1063 1052 # \ / merging rev3 and rev4 should use bar@rev2
1064 1053 # \- 2 --- 4 as the merge base
1065 1054 #
1066 1055
1067 1056 cfname = copy[0]
1068 1057 crev = manifest1.get(cfname)
1069 1058 newfparent = fparent2
1070 1059
1071 1060 if manifest2: # branch merge
1072 1061 if fparent2 == nullid or crev is None: # copied on remote side
1073 1062 if cfname in manifest2:
1074 1063 crev = manifest2[cfname]
1075 1064 newfparent = fparent1
1076 1065
1077 1066 # find source in nearest ancestor if we've lost track
1078 1067 if not crev:
1079 1068 self.ui.debug(" %s: searching for copy revision for %s\n" %
1080 1069 (fname, cfname))
1081 1070 for ancestor in self[None].ancestors():
1082 1071 if cfname in ancestor:
1083 1072 crev = ancestor[cfname].filenode()
1084 1073 break
1085 1074
1086 1075 if crev:
1087 1076 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1088 1077 meta["copy"] = cfname
1089 1078 meta["copyrev"] = hex(crev)
1090 1079 fparent1, fparent2 = nullid, newfparent
1091 1080 else:
1092 1081 self.ui.warn(_("warning: can't find ancestor for '%s' "
1093 1082 "copied from '%s'!\n") % (fname, cfname))
1094 1083
1095 1084 elif fparent2 != nullid:
1096 1085 # is one parent an ancestor of the other?
1097 1086 fparentancestor = flog.ancestor(fparent1, fparent2)
1098 1087 if fparentancestor == fparent1:
1099 1088 fparent1, fparent2 = fparent2, nullid
1100 1089 elif fparentancestor == fparent2:
1101 1090 fparent2 = nullid
1102 1091
1103 1092 # is the file changed?
1104 1093 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1105 1094 changelist.append(fname)
1106 1095 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1107 1096
1108 1097 # are just the flags changed during merge?
1109 1098 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1110 1099 changelist.append(fname)
1111 1100
1112 1101 return fparent1
1113 1102
1114 1103 def commit(self, text="", user=None, date=None, match=None, force=False,
1115 1104 editor=False, extra={}):
1116 1105 """Add a new revision to current repository.
1117 1106
1118 1107 Revision information is gathered from the working directory,
1119 1108 match can be used to filter the committed files. If editor is
1120 1109 supplied, it is called to get a commit message.
1121 1110 """
1122 1111
1123 1112 def fail(f, msg):
1124 1113 raise util.Abort('%s: %s' % (f, msg))
1125 1114
1126 1115 if not match:
1127 1116 match = matchmod.always(self.root, '')
1128 1117
1129 1118 if not force:
1130 1119 vdirs = []
1131 1120 match.dir = vdirs.append
1132 1121 match.bad = fail
1133 1122
1134 1123 wlock = self.wlock()
1135 1124 try:
1136 1125 wctx = self[None]
1137 1126 merge = len(wctx.parents()) > 1
1138 1127
1139 1128 if (not force and merge and match and
1140 1129 (match.files() or match.anypats())):
1141 1130 raise util.Abort(_('cannot partially commit a merge '
1142 1131 '(do not specify files or patterns)'))
1143 1132
1144 1133 changes = self.status(match=match, clean=force)
1145 1134 if force:
1146 1135 changes[0].extend(changes[6]) # mq may commit unchanged files
1147 1136
1148 1137 # check subrepos
1149 1138 subs = []
1150 1139 commitsubs = set()
1151 1140 newstate = wctx.substate.copy()
1152 1141 # only manage subrepos and .hgsubstate if .hgsub is present
1153 1142 if '.hgsub' in wctx:
1154 1143 # we'll decide whether to track this ourselves, thanks
1155 1144 if '.hgsubstate' in changes[0]:
1156 1145 changes[0].remove('.hgsubstate')
1157 1146 if '.hgsubstate' in changes[2]:
1158 1147 changes[2].remove('.hgsubstate')
1159 1148
1160 1149 # compare current state to last committed state
1161 1150 # build new substate based on last committed state
1162 1151 oldstate = wctx.p1().substate
1163 1152 for s in sorted(newstate.keys()):
1164 1153 if not match(s):
1165 1154 # ignore working copy, use old state if present
1166 1155 if s in oldstate:
1167 1156 newstate[s] = oldstate[s]
1168 1157 continue
1169 1158 if not force:
1170 1159 raise util.Abort(
1171 1160 _("commit with new subrepo %s excluded") % s)
1172 1161 if wctx.sub(s).dirty(True):
1173 1162 if not self.ui.configbool('ui', 'commitsubrepos'):
1174 1163 raise util.Abort(
1175 1164 _("uncommitted changes in subrepo %s") % s,
1176 1165 hint=_("use --subrepos for recursive commit"))
1177 1166 subs.append(s)
1178 1167 commitsubs.add(s)
1179 1168 else:
1180 1169 bs = wctx.sub(s).basestate()
1181 1170 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1182 1171 if oldstate.get(s, (None, None, None))[1] != bs:
1183 1172 subs.append(s)
1184 1173
1185 1174 # check for removed subrepos
1186 1175 for p in wctx.parents():
1187 1176 r = [s for s in p.substate if s not in newstate]
1188 1177 subs += [s for s in r if match(s)]
1189 1178 if subs:
1190 1179 if (not match('.hgsub') and
1191 1180 '.hgsub' in (wctx.modified() + wctx.added())):
1192 1181 raise util.Abort(
1193 1182 _("can't commit subrepos without .hgsub"))
1194 1183 changes[0].insert(0, '.hgsubstate')
1195 1184
1196 1185 elif '.hgsub' in changes[2]:
1197 1186 # clean up .hgsubstate when .hgsub is removed
1198 1187 if ('.hgsubstate' in wctx and
1199 1188 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1200 1189 changes[2].insert(0, '.hgsubstate')
1201 1190
1202 1191 # make sure all explicit patterns are matched
1203 1192 if not force and match.files():
1204 1193 matched = set(changes[0] + changes[1] + changes[2])
1205 1194
1206 1195 for f in match.files():
1207 1196 if f == '.' or f in matched or f in wctx.substate:
1208 1197 continue
1209 1198 if f in changes[3]: # missing
1210 1199 fail(f, _('file not found!'))
1211 1200 if f in vdirs: # visited directory
1212 1201 d = f + '/'
1213 1202 for mf in matched:
1214 1203 if mf.startswith(d):
1215 1204 break
1216 1205 else:
1217 1206 fail(f, _("no match under directory!"))
1218 1207 elif f not in self.dirstate:
1219 1208 fail(f, _("file not tracked!"))
1220 1209
1221 1210 if (not force and not extra.get("close") and not merge
1222 1211 and not (changes[0] or changes[1] or changes[2])
1223 1212 and wctx.branch() == wctx.p1().branch()):
1224 1213 return None
1225 1214
1226 1215 if merge and changes[3]:
1227 1216 raise util.Abort(_("cannot commit merge with missing files"))
1228 1217
1229 1218 ms = mergemod.mergestate(self)
1230 1219 for f in changes[0]:
1231 1220 if f in ms and ms[f] == 'u':
1232 1221 raise util.Abort(_("unresolved merge conflicts "
1233 1222 "(see hg help resolve)"))
1234 1223
1235 1224 cctx = context.workingctx(self, text, user, date, extra, changes)
1236 1225 if editor:
1237 1226 cctx._text = editor(self, cctx, subs)
1238 1227 edited = (text != cctx._text)
1239 1228
1240 1229 # commit subs and write new state
1241 1230 if subs:
1242 1231 for s in sorted(commitsubs):
1243 1232 sub = wctx.sub(s)
1244 1233 self.ui.status(_('committing subrepository %s\n') %
1245 1234 subrepo.subrelpath(sub))
1246 1235 sr = sub.commit(cctx._text, user, date)
1247 1236 newstate[s] = (newstate[s][0], sr)
1248 1237 subrepo.writestate(self, newstate)
1249 1238
1250 1239 # Save commit message in case this transaction gets rolled back
1251 1240 # (e.g. by a pretxncommit hook). Leave the content alone on
1252 1241 # the assumption that the user will use the same editor again.
1253 1242 msgfn = self.savecommitmessage(cctx._text)
1254 1243
1255 1244 p1, p2 = self.dirstate.parents()
1256 1245 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1257 1246 try:
1258 1247 self.hook("precommit", throw=True, parent1=hookp1,
1259 1248 parent2=hookp2)
1260 1249 ret = self.commitctx(cctx, True)
1261 1250 except: # re-raises
1262 1251 if edited:
1263 1252 self.ui.write(
1264 1253 _('note: commit message saved in %s\n') % msgfn)
1265 1254 raise
1266 1255
1267 1256 # update bookmarks, dirstate and mergestate
1268 1257 bookmarks.update(self, [p1, p2], ret)
1269 1258 for f in changes[0] + changes[1]:
1270 1259 self.dirstate.normal(f)
1271 1260 for f in changes[2]:
1272 1261 self.dirstate.drop(f)
1273 1262 self.dirstate.setparents(ret)
1274 1263 ms.reset()
1275 1264 finally:
1276 1265 wlock.release()
1277 1266
1278 1267 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1279 1268 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1280 1269 self._afterlock(commithook)
1281 1270 return ret
1282 1271
1283 1272 def commitctx(self, ctx, error=False):
1284 1273 """Add a new revision to current repository.
1285 1274 Revision information is passed via the context argument.
1286 1275 """
1287 1276
1288 1277 tr = lock = None
1289 1278 removed = list(ctx.removed())
1290 1279 p1, p2 = ctx.p1(), ctx.p2()
1291 1280 user = ctx.user()
1292 1281
1293 1282 lock = self.lock()
1294 1283 try:
1295 1284 tr = self.transaction("commit")
1296 1285 trp = weakref.proxy(tr)
1297 1286
1298 1287 if ctx.files():
1299 1288 m1 = p1.manifest().copy()
1300 1289 m2 = p2.manifest()
1301 1290
1302 1291 # check in files
1303 1292 new = {}
1304 1293 changed = []
1305 1294 linkrev = len(self)
1306 1295 for f in sorted(ctx.modified() + ctx.added()):
1307 1296 self.ui.note(f + "\n")
1308 1297 try:
1309 1298 fctx = ctx[f]
1310 1299 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1311 1300 changed)
1312 1301 m1.set(f, fctx.flags())
1313 1302 except OSError, inst:
1314 1303 self.ui.warn(_("trouble committing %s!\n") % f)
1315 1304 raise
1316 1305 except IOError, inst:
1317 1306 errcode = getattr(inst, 'errno', errno.ENOENT)
1318 1307 if error or errcode and errcode != errno.ENOENT:
1319 1308 self.ui.warn(_("trouble committing %s!\n") % f)
1320 1309 raise
1321 1310 else:
1322 1311 removed.append(f)
1323 1312
1324 1313 # update manifest
1325 1314 m1.update(new)
1326 1315 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1327 1316 drop = [f for f in removed if f in m1]
1328 1317 for f in drop:
1329 1318 del m1[f]
1330 1319 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1331 1320 p2.manifestnode(), (new, drop))
1332 1321 files = changed + removed
1333 1322 else:
1334 1323 mn = p1.manifestnode()
1335 1324 files = []
1336 1325
1337 1326 # update changelog
1338 1327 self.changelog.delayupdate()
1339 1328 n = self.changelog.add(mn, files, ctx.description(),
1340 1329 trp, p1.node(), p2.node(),
1341 1330 user, ctx.date(), ctx.extra().copy())
1342 1331 p = lambda: self.changelog.writepending() and self.root or ""
1343 1332 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1344 1333 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1345 1334 parent2=xp2, pending=p)
1346 1335 self.changelog.finalize(trp)
1347 1336 # set the new commit is proper phase
1348 1337 targetphase = phases.newcommitphase(self.ui)
1349 1338 if targetphase:
1350 1339 # retract boundary do not alter parent changeset.
1351 1340 # if a parent have higher the resulting phase will
1352 1341 # be compliant anyway
1353 1342 #
1354 1343 # if minimal phase was 0 we don't need to retract anything
1355 1344 phases.retractboundary(self, targetphase, [n])
1356 1345 tr.close()
1357 1346 self.updatebranchcache()
1358 1347 return n
1359 1348 finally:
1360 1349 if tr:
1361 1350 tr.release()
1362 1351 lock.release()
1363 1352
1364 1353 def destroyed(self, newheadnodes=None):
1365 1354 '''Inform the repository that nodes have been destroyed.
1366 1355 Intended for use by strip and rollback, so there's a common
1367 1356 place for anything that has to be done after destroying history.
1368 1357
1369 1358 If you know the branchheadcache was uptodate before nodes were removed
1370 1359 and you also know the set of candidate new heads that may have resulted
1371 1360 from the destruction, you can set newheadnodes. This will enable the
1372 1361 code to update the branchheads cache, rather than having future code
1373 1362 decide it's invalid and regenrating it from scratch.
1374 1363 '''
1375 1364 # If we have info, newheadnodes, on how to update the branch cache, do
1376 1365 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1377 1366 # will be caught the next time it is read.
1378 1367 if newheadnodes:
1379 1368 tiprev = len(self) - 1
1380 1369 ctxgen = (self[node] for node in newheadnodes
1381 1370 if self.changelog.hasnode(node))
1382 1371 self._updatebranchcache(self._branchcache, ctxgen)
1383 1372 self._writebranchcache(self._branchcache, self.changelog.tip(),
1384 1373 tiprev)
1385 1374
1386 1375 # Ensure the persistent tag cache is updated. Doing it now
1387 1376 # means that the tag cache only has to worry about destroyed
1388 1377 # heads immediately after a strip/rollback. That in turn
1389 1378 # guarantees that "cachetip == currenttip" (comparing both rev
1390 1379 # and node) always means no nodes have been added or destroyed.
1391 1380
1392 1381 # XXX this is suboptimal when qrefresh'ing: we strip the current
1393 1382 # head, refresh the tag cache, then immediately add a new head.
1394 1383 # But I think doing it this way is necessary for the "instant
1395 1384 # tag cache retrieval" case to work.
1396 1385 self.invalidatecaches()
1397 1386
1398 1387 def walk(self, match, node=None):
1399 1388 '''
1400 1389 walk recursively through the directory tree or a given
1401 1390 changeset, finding all files matched by the match
1402 1391 function
1403 1392 '''
1404 1393 return self[node].walk(match)
1405 1394
1406 1395 def status(self, node1='.', node2=None, match=None,
1407 1396 ignored=False, clean=False, unknown=False,
1408 1397 listsubrepos=False):
1409 1398 """return status of files between two nodes or node and working
1410 1399 directory.
1411 1400
1412 1401 If node1 is None, use the first dirstate parent instead.
1413 1402 If node2 is None, compare node1 with working directory.
1414 1403 """
1415 1404
1416 1405 def mfmatches(ctx):
1417 1406 mf = ctx.manifest().copy()
1418 1407 if match.always():
1419 1408 return mf
1420 1409 for fn in mf.keys():
1421 1410 if not match(fn):
1422 1411 del mf[fn]
1423 1412 return mf
1424 1413
1425 1414 if isinstance(node1, context.changectx):
1426 1415 ctx1 = node1
1427 1416 else:
1428 1417 ctx1 = self[node1]
1429 1418 if isinstance(node2, context.changectx):
1430 1419 ctx2 = node2
1431 1420 else:
1432 1421 ctx2 = self[node2]
1433 1422
1434 1423 working = ctx2.rev() is None
1435 1424 parentworking = working and ctx1 == self['.']
1436 1425 match = match or matchmod.always(self.root, self.getcwd())
1437 1426 listignored, listclean, listunknown = ignored, clean, unknown
1438 1427
1439 1428 # load earliest manifest first for caching reasons
1440 1429 if not working and ctx2.rev() < ctx1.rev():
1441 1430 ctx2.manifest()
1442 1431
1443 1432 if not parentworking:
1444 1433 def bad(f, msg):
1445 1434 # 'f' may be a directory pattern from 'match.files()',
1446 1435 # so 'f not in ctx1' is not enough
1447 1436 if f not in ctx1 and f not in ctx1.dirs():
1448 1437 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1449 1438 match.bad = bad
1450 1439
1451 1440 if working: # we need to scan the working dir
1452 1441 subrepos = []
1453 1442 if '.hgsub' in self.dirstate:
1454 1443 subrepos = ctx2.substate.keys()
1455 1444 s = self.dirstate.status(match, subrepos, listignored,
1456 1445 listclean, listunknown)
1457 1446 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1458 1447
1459 1448 # check for any possibly clean files
1460 1449 if parentworking and cmp:
1461 1450 fixup = []
1462 1451 # do a full compare of any files that might have changed
1463 1452 for f in sorted(cmp):
1464 1453 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1465 1454 or ctx1[f].cmp(ctx2[f])):
1466 1455 modified.append(f)
1467 1456 else:
1468 1457 fixup.append(f)
1469 1458
1470 1459 # update dirstate for files that are actually clean
1471 1460 if fixup:
1472 1461 if listclean:
1473 1462 clean += fixup
1474 1463
1475 1464 try:
1476 1465 # updating the dirstate is optional
1477 1466 # so we don't wait on the lock
1478 1467 wlock = self.wlock(False)
1479 1468 try:
1480 1469 for f in fixup:
1481 1470 self.dirstate.normal(f)
1482 1471 finally:
1483 1472 wlock.release()
1484 1473 except error.LockError:
1485 1474 pass
1486 1475
1487 1476 if not parentworking:
1488 1477 mf1 = mfmatches(ctx1)
1489 1478 if working:
1490 1479 # we are comparing working dir against non-parent
1491 1480 # generate a pseudo-manifest for the working dir
1492 1481 mf2 = mfmatches(self['.'])
1493 1482 for f in cmp + modified + added:
1494 1483 mf2[f] = None
1495 1484 mf2.set(f, ctx2.flags(f))
1496 1485 for f in removed:
1497 1486 if f in mf2:
1498 1487 del mf2[f]
1499 1488 else:
1500 1489 # we are comparing two revisions
1501 1490 deleted, unknown, ignored = [], [], []
1502 1491 mf2 = mfmatches(ctx2)
1503 1492
1504 1493 modified, added, clean = [], [], []
1505 1494 withflags = mf1.withflags() | mf2.withflags()
1506 1495 for fn in mf2:
1507 1496 if fn in mf1:
1508 1497 if (fn not in deleted and
1509 1498 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1510 1499 (mf1[fn] != mf2[fn] and
1511 1500 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1512 1501 modified.append(fn)
1513 1502 elif listclean:
1514 1503 clean.append(fn)
1515 1504 del mf1[fn]
1516 1505 elif fn not in deleted:
1517 1506 added.append(fn)
1518 1507 removed = mf1.keys()
1519 1508
1520 1509 if working and modified and not self.dirstate._checklink:
1521 1510 # Symlink placeholders may get non-symlink-like contents
1522 1511 # via user error or dereferencing by NFS or Samba servers,
1523 1512 # so we filter out any placeholders that don't look like a
1524 1513 # symlink
1525 1514 sane = []
1526 1515 for f in modified:
1527 1516 if ctx2.flags(f) == 'l':
1528 1517 d = ctx2[f].data()
1529 1518 if len(d) >= 1024 or '\n' in d or util.binary(d):
1530 1519 self.ui.debug('ignoring suspect symlink placeholder'
1531 1520 ' "%s"\n' % f)
1532 1521 continue
1533 1522 sane.append(f)
1534 1523 modified = sane
1535 1524
1536 1525 r = modified, added, removed, deleted, unknown, ignored, clean
1537 1526
1538 1527 if listsubrepos:
1539 1528 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1540 1529 if working:
1541 1530 rev2 = None
1542 1531 else:
1543 1532 rev2 = ctx2.substate[subpath][1]
1544 1533 try:
1545 1534 submatch = matchmod.narrowmatcher(subpath, match)
1546 1535 s = sub.status(rev2, match=submatch, ignored=listignored,
1547 1536 clean=listclean, unknown=listunknown,
1548 1537 listsubrepos=True)
1549 1538 for rfiles, sfiles in zip(r, s):
1550 1539 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1551 1540 except error.LookupError:
1552 1541 self.ui.status(_("skipping missing subrepository: %s\n")
1553 1542 % subpath)
1554 1543
1555 1544 for l in r:
1556 1545 l.sort()
1557 1546 return r
1558 1547
1559 1548 def heads(self, start=None):
1560 1549 heads = self.changelog.heads(start)
1561 1550 # sort the output in rev descending order
1562 1551 return sorted(heads, key=self.changelog.rev, reverse=True)
1563 1552
1564 1553 def branchheads(self, branch=None, start=None, closed=False):
1565 1554 '''return a (possibly filtered) list of heads for the given branch
1566 1555
1567 1556 Heads are returned in topological order, from newest to oldest.
1568 1557 If branch is None, use the dirstate branch.
1569 1558 If start is not None, return only heads reachable from start.
1570 1559 If closed is True, return heads that are marked as closed as well.
1571 1560 '''
1572 1561 if branch is None:
1573 1562 branch = self[None].branch()
1574 1563 branches = self.branchmap()
1575 1564 if branch not in branches:
1576 1565 return []
1577 1566 # the cache returns heads ordered lowest to highest
1578 1567 bheads = list(reversed(branches[branch]))
1579 1568 if start is not None:
1580 1569 # filter out the heads that cannot be reached from startrev
1581 1570 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1582 1571 bheads = [h for h in bheads if h in fbheads]
1583 1572 if not closed:
1584 1573 bheads = [h for h in bheads if not self[h].closesbranch()]
1585 1574 return bheads
1586 1575
1587 1576 def branches(self, nodes):
1588 1577 if not nodes:
1589 1578 nodes = [self.changelog.tip()]
1590 1579 b = []
1591 1580 for n in nodes:
1592 1581 t = n
1593 1582 while True:
1594 1583 p = self.changelog.parents(n)
1595 1584 if p[1] != nullid or p[0] == nullid:
1596 1585 b.append((t, n, p[0], p[1]))
1597 1586 break
1598 1587 n = p[0]
1599 1588 return b
1600 1589
1601 1590 def between(self, pairs):
1602 1591 r = []
1603 1592
1604 1593 for top, bottom in pairs:
1605 1594 n, l, i = top, [], 0
1606 1595 f = 1
1607 1596
1608 1597 while n != bottom and n != nullid:
1609 1598 p = self.changelog.parents(n)[0]
1610 1599 if i == f:
1611 1600 l.append(n)
1612 1601 f = f * 2
1613 1602 n = p
1614 1603 i += 1
1615 1604
1616 1605 r.append(l)
1617 1606
1618 1607 return r
1619 1608
1620 1609 def pull(self, remote, heads=None, force=False):
1621 1610 lock = self.lock()
1622 1611 try:
1623 1612 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1624 1613 force=force)
1625 1614 common, fetch, rheads = tmp
1626 1615 if not fetch:
1627 1616 self.ui.status(_("no changes found\n"))
1628 1617 added = []
1629 1618 result = 0
1630 1619 else:
1631 1620 if heads is None and list(common) == [nullid]:
1632 1621 self.ui.status(_("requesting all changes\n"))
1633 1622 elif heads is None and remote.capable('changegroupsubset'):
1634 1623 # issue1320, avoid a race if remote changed after discovery
1635 1624 heads = rheads
1636 1625
1637 1626 if remote.capable('getbundle'):
1638 1627 cg = remote.getbundle('pull', common=common,
1639 1628 heads=heads or rheads)
1640 1629 elif heads is None:
1641 1630 cg = remote.changegroup(fetch, 'pull')
1642 1631 elif not remote.capable('changegroupsubset'):
1643 1632 raise util.Abort(_("partial pull cannot be done because "
1644 1633 "other repository doesn't support "
1645 1634 "changegroupsubset."))
1646 1635 else:
1647 1636 cg = remote.changegroupsubset(fetch, heads, 'pull')
1648 1637 clstart = len(self.changelog)
1649 1638 result = self.addchangegroup(cg, 'pull', remote.url())
1650 1639 clend = len(self.changelog)
1651 1640 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1652 1641
1653 1642 # compute target subset
1654 1643 if heads is None:
1655 1644 # We pulled every thing possible
1656 1645 # sync on everything common
1657 1646 subset = common + added
1658 1647 else:
1659 1648 # We pulled a specific subset
1660 1649 # sync on this subset
1661 1650 subset = heads
1662 1651
1663 1652 # Get remote phases data from remote
1664 1653 remotephases = remote.listkeys('phases')
1665 1654 publishing = bool(remotephases.get('publishing', False))
1666 1655 if remotephases and not publishing:
1667 1656 # remote is new and unpublishing
1668 1657 pheads, _dr = phases.analyzeremotephases(self, subset,
1669 1658 remotephases)
1670 1659 phases.advanceboundary(self, phases.public, pheads)
1671 1660 phases.advanceboundary(self, phases.draft, subset)
1672 1661 else:
1673 1662 # Remote is old or publishing all common changesets
1674 1663 # should be seen as public
1675 1664 phases.advanceboundary(self, phases.public, subset)
1676 1665
1677 1666 remoteobs = remote.listkeys('obsolete')
1678 1667 if 'dump' in remoteobs:
1679 1668 data = base85.b85decode(remoteobs['dump'])
1680 1669 self.obsstore.mergemarkers(data)
1681 1670 finally:
1682 1671 lock.release()
1683 1672
1684 1673 return result
1685 1674
1686 1675 def checkpush(self, force, revs):
1687 1676 """Extensions can override this function if additional checks have
1688 1677 to be performed before pushing, or call it if they override push
1689 1678 command.
1690 1679 """
1691 1680 pass
1692 1681
1693 1682 def push(self, remote, force=False, revs=None, newbranch=False):
1694 1683 '''Push outgoing changesets (limited by revs) from the current
1695 1684 repository to remote. Return an integer:
1696 1685 - None means nothing to push
1697 1686 - 0 means HTTP error
1698 1687 - 1 means we pushed and remote head count is unchanged *or*
1699 1688 we have outgoing changesets but refused to push
1700 1689 - other values as described by addchangegroup()
1701 1690 '''
1702 1691 # there are two ways to push to remote repo:
1703 1692 #
1704 1693 # addchangegroup assumes local user can lock remote
1705 1694 # repo (local filesystem, old ssh servers).
1706 1695 #
1707 1696 # unbundle assumes local user cannot lock remote repo (new ssh
1708 1697 # servers, http servers).
1709 1698
1710 1699 # get local lock as we might write phase data
1711 1700 locallock = self.lock()
1712 1701 try:
1713 1702 self.checkpush(force, revs)
1714 1703 lock = None
1715 1704 unbundle = remote.capable('unbundle')
1716 1705 if not unbundle:
1717 1706 lock = remote.lock()
1718 1707 try:
1719 1708 # discovery
1720 1709 fci = discovery.findcommonincoming
1721 1710 commoninc = fci(self, remote, force=force)
1722 1711 common, inc, remoteheads = commoninc
1723 1712 fco = discovery.findcommonoutgoing
1724 1713 outgoing = fco(self, remote, onlyheads=revs,
1725 1714 commoninc=commoninc, force=force)
1726 1715
1727 1716
1728 1717 if not outgoing.missing:
1729 1718 # nothing to push
1730 1719 scmutil.nochangesfound(self.ui, outgoing.excluded)
1731 1720 ret = None
1732 1721 else:
1733 1722 # something to push
1734 1723 if not force:
1735 1724 discovery.checkheads(self, remote, outgoing,
1736 1725 remoteheads, newbranch,
1737 1726 bool(inc))
1738 1727
1739 1728 # create a changegroup from local
1740 1729 if revs is None and not outgoing.excluded:
1741 1730 # push everything,
1742 1731 # use the fast path, no race possible on push
1743 1732 cg = self._changegroup(outgoing.missing, 'push')
1744 1733 else:
1745 1734 cg = self.getlocalbundle('push', outgoing)
1746 1735
1747 1736 # apply changegroup to remote
1748 1737 if unbundle:
1749 1738 # local repo finds heads on server, finds out what
1750 1739 # revs it must push. once revs transferred, if server
1751 1740 # finds it has different heads (someone else won
1752 1741 # commit/push race), server aborts.
1753 1742 if force:
1754 1743 remoteheads = ['force']
1755 1744 # ssh: return remote's addchangegroup()
1756 1745 # http: return remote's addchangegroup() or 0 for error
1757 1746 ret = remote.unbundle(cg, remoteheads, 'push')
1758 1747 else:
1759 1748 # we return an integer indicating remote head count
1760 1749 # change
1761 1750 ret = remote.addchangegroup(cg, 'push', self.url())
1762 1751
1763 1752 if ret:
1764 1753 # push succeed, synchonize target of the push
1765 1754 cheads = outgoing.missingheads
1766 1755 elif revs is None:
1767 1756 # All out push fails. synchronize all common
1768 1757 cheads = outgoing.commonheads
1769 1758 else:
1770 1759 # I want cheads = heads(::missingheads and ::commonheads)
1771 1760 # (missingheads is revs with secret changeset filtered out)
1772 1761 #
1773 1762 # This can be expressed as:
1774 1763 # cheads = ( (missingheads and ::commonheads)
1775 1764 # + (commonheads and ::missingheads))"
1776 1765 # )
1777 1766 #
1778 1767 # while trying to push we already computed the following:
1779 1768 # common = (::commonheads)
1780 1769 # missing = ((commonheads::missingheads) - commonheads)
1781 1770 #
1782 1771 # We can pick:
1783 1772 # * missingheads part of comon (::commonheads)
1784 1773 common = set(outgoing.common)
1785 1774 cheads = [node for node in revs if node in common]
1786 1775 # and
1787 1776 # * commonheads parents on missing
1788 1777 revset = self.set('%ln and parents(roots(%ln))',
1789 1778 outgoing.commonheads,
1790 1779 outgoing.missing)
1791 1780 cheads.extend(c.node() for c in revset)
1792 1781 # even when we don't push, exchanging phase data is useful
1793 1782 remotephases = remote.listkeys('phases')
1794 1783 if not remotephases: # old server or public only repo
1795 1784 phases.advanceboundary(self, phases.public, cheads)
1796 1785 # don't push any phase data as there is nothing to push
1797 1786 else:
1798 1787 ana = phases.analyzeremotephases(self, cheads, remotephases)
1799 1788 pheads, droots = ana
1800 1789 ### Apply remote phase on local
1801 1790 if remotephases.get('publishing', False):
1802 1791 phases.advanceboundary(self, phases.public, cheads)
1803 1792 else: # publish = False
1804 1793 phases.advanceboundary(self, phases.public, pheads)
1805 1794 phases.advanceboundary(self, phases.draft, cheads)
1806 1795 ### Apply local phase on remote
1807 1796
1808 1797 # Get the list of all revs draft on remote by public here.
1809 1798 # XXX Beware that revset break if droots is not strictly
1810 1799 # XXX root we may want to ensure it is but it is costly
1811 1800 outdated = self.set('heads((%ln::%ln) and public())',
1812 1801 droots, cheads)
1813 1802 for newremotehead in outdated:
1814 1803 r = remote.pushkey('phases',
1815 1804 newremotehead.hex(),
1816 1805 str(phases.draft),
1817 1806 str(phases.public))
1818 1807 if not r:
1819 1808 self.ui.warn(_('updating %s to public failed!\n')
1820 1809 % newremotehead)
1821 1810 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1822 1811 data = self.obsstore._writemarkers()
1823 1812 r = remote.pushkey('obsolete', 'dump', '',
1824 1813 base85.b85encode(data))
1825 1814 if not r:
1826 1815 self.ui.warn(_('failed to push obsolete markers!\n'))
1827 1816 finally:
1828 1817 if lock is not None:
1829 1818 lock.release()
1830 1819 finally:
1831 1820 locallock.release()
1832 1821
1833 1822 self.ui.debug("checking for updated bookmarks\n")
1834 1823 rb = remote.listkeys('bookmarks')
1835 1824 for k in rb.keys():
1836 1825 if k in self._bookmarks:
1837 1826 nr, nl = rb[k], hex(self._bookmarks[k])
1838 1827 if nr in self:
1839 1828 cr = self[nr]
1840 1829 cl = self[nl]
1841 1830 if cl in cr.descendants():
1842 1831 r = remote.pushkey('bookmarks', k, nr, nl)
1843 1832 if r:
1844 1833 self.ui.status(_("updating bookmark %s\n") % k)
1845 1834 else:
1846 1835 self.ui.warn(_('updating bookmark %s'
1847 1836 ' failed!\n') % k)
1848 1837
1849 1838 return ret
1850 1839
1851 1840 def changegroupinfo(self, nodes, source):
1852 1841 if self.ui.verbose or source == 'bundle':
1853 1842 self.ui.status(_("%d changesets found\n") % len(nodes))
1854 1843 if self.ui.debugflag:
1855 1844 self.ui.debug("list of changesets:\n")
1856 1845 for node in nodes:
1857 1846 self.ui.debug("%s\n" % hex(node))
1858 1847
1859 1848 def changegroupsubset(self, bases, heads, source):
1860 1849 """Compute a changegroup consisting of all the nodes that are
1861 1850 descendants of any of the bases and ancestors of any of the heads.
1862 1851 Return a chunkbuffer object whose read() method will return
1863 1852 successive changegroup chunks.
1864 1853
1865 1854 It is fairly complex as determining which filenodes and which
1866 1855 manifest nodes need to be included for the changeset to be complete
1867 1856 is non-trivial.
1868 1857
1869 1858 Another wrinkle is doing the reverse, figuring out which changeset in
1870 1859 the changegroup a particular filenode or manifestnode belongs to.
1871 1860 """
1872 1861 cl = self.changelog
1873 1862 if not bases:
1874 1863 bases = [nullid]
1875 1864 csets, bases, heads = cl.nodesbetween(bases, heads)
1876 1865 # We assume that all ancestors of bases are known
1877 1866 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1878 1867 return self._changegroupsubset(common, csets, heads, source)
1879 1868
1880 1869 def getlocalbundle(self, source, outgoing):
1881 1870 """Like getbundle, but taking a discovery.outgoing as an argument.
1882 1871
1883 1872 This is only implemented for local repos and reuses potentially
1884 1873 precomputed sets in outgoing."""
1885 1874 if not outgoing.missing:
1886 1875 return None
1887 1876 return self._changegroupsubset(outgoing.common,
1888 1877 outgoing.missing,
1889 1878 outgoing.missingheads,
1890 1879 source)
1891 1880
1892 1881 def getbundle(self, source, heads=None, common=None):
1893 1882 """Like changegroupsubset, but returns the set difference between the
1894 1883 ancestors of heads and the ancestors common.
1895 1884
1896 1885 If heads is None, use the local heads. If common is None, use [nullid].
1897 1886
1898 1887 The nodes in common might not all be known locally due to the way the
1899 1888 current discovery protocol works.
1900 1889 """
1901 1890 cl = self.changelog
1902 1891 if common:
1903 1892 nm = cl.nodemap
1904 1893 common = [n for n in common if n in nm]
1905 1894 else:
1906 1895 common = [nullid]
1907 1896 if not heads:
1908 1897 heads = cl.heads()
1909 1898 return self.getlocalbundle(source,
1910 1899 discovery.outgoing(cl, common, heads))
1911 1900
1912 1901 def _changegroupsubset(self, commonrevs, csets, heads, source):
1913 1902
1914 1903 cl = self.changelog
1915 1904 mf = self.manifest
1916 1905 mfs = {} # needed manifests
1917 1906 fnodes = {} # needed file nodes
1918 1907 changedfiles = set()
1919 1908 fstate = ['', {}]
1920 1909 count = [0, 0]
1921 1910
1922 1911 # can we go through the fast path ?
1923 1912 heads.sort()
1924 1913 if heads == sorted(self.heads()):
1925 1914 return self._changegroup(csets, source)
1926 1915
1927 1916 # slow path
1928 1917 self.hook('preoutgoing', throw=True, source=source)
1929 1918 self.changegroupinfo(csets, source)
1930 1919
1931 1920 # filter any nodes that claim to be part of the known set
1932 1921 def prune(revlog, missing):
1933 1922 rr, rl = revlog.rev, revlog.linkrev
1934 1923 return [n for n in missing
1935 1924 if rl(rr(n)) not in commonrevs]
1936 1925
1937 1926 progress = self.ui.progress
1938 1927 _bundling = _('bundling')
1939 1928 _changesets = _('changesets')
1940 1929 _manifests = _('manifests')
1941 1930 _files = _('files')
1942 1931
1943 1932 def lookup(revlog, x):
1944 1933 if revlog == cl:
1945 1934 c = cl.read(x)
1946 1935 changedfiles.update(c[3])
1947 1936 mfs.setdefault(c[0], x)
1948 1937 count[0] += 1
1949 1938 progress(_bundling, count[0],
1950 1939 unit=_changesets, total=count[1])
1951 1940 return x
1952 1941 elif revlog == mf:
1953 1942 clnode = mfs[x]
1954 1943 mdata = mf.readfast(x)
1955 1944 for f, n in mdata.iteritems():
1956 1945 if f in changedfiles:
1957 1946 fnodes[f].setdefault(n, clnode)
1958 1947 count[0] += 1
1959 1948 progress(_bundling, count[0],
1960 1949 unit=_manifests, total=count[1])
1961 1950 return clnode
1962 1951 else:
1963 1952 progress(_bundling, count[0], item=fstate[0],
1964 1953 unit=_files, total=count[1])
1965 1954 return fstate[1][x]
1966 1955
1967 1956 bundler = changegroup.bundle10(lookup)
1968 1957 reorder = self.ui.config('bundle', 'reorder', 'auto')
1969 1958 if reorder == 'auto':
1970 1959 reorder = None
1971 1960 else:
1972 1961 reorder = util.parsebool(reorder)
1973 1962
1974 1963 def gengroup():
1975 1964 # Create a changenode group generator that will call our functions
1976 1965 # back to lookup the owning changenode and collect information.
1977 1966 count[:] = [0, len(csets)]
1978 1967 for chunk in cl.group(csets, bundler, reorder=reorder):
1979 1968 yield chunk
1980 1969 progress(_bundling, None)
1981 1970
1982 1971 # Create a generator for the manifestnodes that calls our lookup
1983 1972 # and data collection functions back.
1984 1973 for f in changedfiles:
1985 1974 fnodes[f] = {}
1986 1975 count[:] = [0, len(mfs)]
1987 1976 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1988 1977 yield chunk
1989 1978 progress(_bundling, None)
1990 1979
1991 1980 mfs.clear()
1992 1981
1993 1982 # Go through all our files in order sorted by name.
1994 1983 count[:] = [0, len(changedfiles)]
1995 1984 for fname in sorted(changedfiles):
1996 1985 filerevlog = self.file(fname)
1997 1986 if not len(filerevlog):
1998 1987 raise util.Abort(_("empty or missing revlog for %s")
1999 1988 % fname)
2000 1989 fstate[0] = fname
2001 1990 fstate[1] = fnodes.pop(fname, {})
2002 1991
2003 1992 nodelist = prune(filerevlog, fstate[1])
2004 1993 if nodelist:
2005 1994 count[0] += 1
2006 1995 yield bundler.fileheader(fname)
2007 1996 for chunk in filerevlog.group(nodelist, bundler, reorder):
2008 1997 yield chunk
2009 1998
2010 1999 # Signal that no more groups are left.
2011 2000 yield bundler.close()
2012 2001 progress(_bundling, None)
2013 2002
2014 2003 if csets:
2015 2004 self.hook('outgoing', node=hex(csets[0]), source=source)
2016 2005
2017 2006 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2018 2007
2019 2008 def changegroup(self, basenodes, source):
2020 2009 # to avoid a race we use changegroupsubset() (issue1320)
2021 2010 return self.changegroupsubset(basenodes, self.heads(), source)
2022 2011
2023 2012 def _changegroup(self, nodes, source):
2024 2013 """Compute the changegroup of all nodes that we have that a recipient
2025 2014 doesn't. Return a chunkbuffer object whose read() method will return
2026 2015 successive changegroup chunks.
2027 2016
2028 2017 This is much easier than the previous function as we can assume that
2029 2018 the recipient has any changenode we aren't sending them.
2030 2019
2031 2020 nodes is the set of nodes to send"""
2032 2021
2033 2022 cl = self.changelog
2034 2023 mf = self.manifest
2035 2024 mfs = {}
2036 2025 changedfiles = set()
2037 2026 fstate = ['']
2038 2027 count = [0, 0]
2039 2028
2040 2029 self.hook('preoutgoing', throw=True, source=source)
2041 2030 self.changegroupinfo(nodes, source)
2042 2031
2043 2032 revset = set([cl.rev(n) for n in nodes])
2044 2033
2045 2034 def gennodelst(log):
2046 2035 ln, llr = log.node, log.linkrev
2047 2036 return [ln(r) for r in log if llr(r) in revset]
2048 2037
2049 2038 progress = self.ui.progress
2050 2039 _bundling = _('bundling')
2051 2040 _changesets = _('changesets')
2052 2041 _manifests = _('manifests')
2053 2042 _files = _('files')
2054 2043
2055 2044 def lookup(revlog, x):
2056 2045 if revlog == cl:
2057 2046 c = cl.read(x)
2058 2047 changedfiles.update(c[3])
2059 2048 mfs.setdefault(c[0], x)
2060 2049 count[0] += 1
2061 2050 progress(_bundling, count[0],
2062 2051 unit=_changesets, total=count[1])
2063 2052 return x
2064 2053 elif revlog == mf:
2065 2054 count[0] += 1
2066 2055 progress(_bundling, count[0],
2067 2056 unit=_manifests, total=count[1])
2068 2057 return cl.node(revlog.linkrev(revlog.rev(x)))
2069 2058 else:
2070 2059 progress(_bundling, count[0], item=fstate[0],
2071 2060 total=count[1], unit=_files)
2072 2061 return cl.node(revlog.linkrev(revlog.rev(x)))
2073 2062
2074 2063 bundler = changegroup.bundle10(lookup)
2075 2064 reorder = self.ui.config('bundle', 'reorder', 'auto')
2076 2065 if reorder == 'auto':
2077 2066 reorder = None
2078 2067 else:
2079 2068 reorder = util.parsebool(reorder)
2080 2069
2081 2070 def gengroup():
2082 2071 '''yield a sequence of changegroup chunks (strings)'''
2083 2072 # construct a list of all changed files
2084 2073
2085 2074 count[:] = [0, len(nodes)]
2086 2075 for chunk in cl.group(nodes, bundler, reorder=reorder):
2087 2076 yield chunk
2088 2077 progress(_bundling, None)
2089 2078
2090 2079 count[:] = [0, len(mfs)]
2091 2080 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2092 2081 yield chunk
2093 2082 progress(_bundling, None)
2094 2083
2095 2084 count[:] = [0, len(changedfiles)]
2096 2085 for fname in sorted(changedfiles):
2097 2086 filerevlog = self.file(fname)
2098 2087 if not len(filerevlog):
2099 2088 raise util.Abort(_("empty or missing revlog for %s")
2100 2089 % fname)
2101 2090 fstate[0] = fname
2102 2091 nodelist = gennodelst(filerevlog)
2103 2092 if nodelist:
2104 2093 count[0] += 1
2105 2094 yield bundler.fileheader(fname)
2106 2095 for chunk in filerevlog.group(nodelist, bundler, reorder):
2107 2096 yield chunk
2108 2097 yield bundler.close()
2109 2098 progress(_bundling, None)
2110 2099
2111 2100 if nodes:
2112 2101 self.hook('outgoing', node=hex(nodes[0]), source=source)
2113 2102
2114 2103 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2115 2104
2116 2105 def addchangegroup(self, source, srctype, url, emptyok=False):
2117 2106 """Add the changegroup returned by source.read() to this repo.
2118 2107 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2119 2108 the URL of the repo where this changegroup is coming from.
2120 2109
2121 2110 Return an integer summarizing the change to this repo:
2122 2111 - nothing changed or no source: 0
2123 2112 - more heads than before: 1+added heads (2..n)
2124 2113 - fewer heads than before: -1-removed heads (-2..-n)
2125 2114 - number of heads stays the same: 1
2126 2115 """
2127 2116 def csmap(x):
2128 2117 self.ui.debug("add changeset %s\n" % short(x))
2129 2118 return len(cl)
2130 2119
2131 2120 def revmap(x):
2132 2121 return cl.rev(x)
2133 2122
2134 2123 if not source:
2135 2124 return 0
2136 2125
2137 2126 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2138 2127
2139 2128 changesets = files = revisions = 0
2140 2129 efiles = set()
2141 2130
2142 2131 # write changelog data to temp files so concurrent readers will not see
2143 2132 # inconsistent view
2144 2133 cl = self.changelog
2145 2134 cl.delayupdate()
2146 2135 oldheads = cl.heads()
2147 2136
2148 2137 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2149 2138 try:
2150 2139 trp = weakref.proxy(tr)
2151 2140 # pull off the changeset group
2152 2141 self.ui.status(_("adding changesets\n"))
2153 2142 clstart = len(cl)
2154 2143 class prog(object):
2155 2144 step = _('changesets')
2156 2145 count = 1
2157 2146 ui = self.ui
2158 2147 total = None
2159 2148 def __call__(self):
2160 2149 self.ui.progress(self.step, self.count, unit=_('chunks'),
2161 2150 total=self.total)
2162 2151 self.count += 1
2163 2152 pr = prog()
2164 2153 source.callback = pr
2165 2154
2166 2155 source.changelogheader()
2167 2156 srccontent = cl.addgroup(source, csmap, trp)
2168 2157 if not (srccontent or emptyok):
2169 2158 raise util.Abort(_("received changelog group is empty"))
2170 2159 clend = len(cl)
2171 2160 changesets = clend - clstart
2172 2161 for c in xrange(clstart, clend):
2173 2162 efiles.update(self[c].files())
2174 2163 efiles = len(efiles)
2175 2164 self.ui.progress(_('changesets'), None)
2176 2165
2177 2166 # pull off the manifest group
2178 2167 self.ui.status(_("adding manifests\n"))
2179 2168 pr.step = _('manifests')
2180 2169 pr.count = 1
2181 2170 pr.total = changesets # manifests <= changesets
2182 2171 # no need to check for empty manifest group here:
2183 2172 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2184 2173 # no new manifest will be created and the manifest group will
2185 2174 # be empty during the pull
2186 2175 source.manifestheader()
2187 2176 self.manifest.addgroup(source, revmap, trp)
2188 2177 self.ui.progress(_('manifests'), None)
2189 2178
2190 2179 needfiles = {}
2191 2180 if self.ui.configbool('server', 'validate', default=False):
2192 2181 # validate incoming csets have their manifests
2193 2182 for cset in xrange(clstart, clend):
2194 2183 mfest = self.changelog.read(self.changelog.node(cset))[0]
2195 2184 mfest = self.manifest.readdelta(mfest)
2196 2185 # store file nodes we must see
2197 2186 for f, n in mfest.iteritems():
2198 2187 needfiles.setdefault(f, set()).add(n)
2199 2188
2200 2189 # process the files
2201 2190 self.ui.status(_("adding file changes\n"))
2202 2191 pr.step = _('files')
2203 2192 pr.count = 1
2204 2193 pr.total = efiles
2205 2194 source.callback = None
2206 2195
2207 2196 while True:
2208 2197 chunkdata = source.filelogheader()
2209 2198 if not chunkdata:
2210 2199 break
2211 2200 f = chunkdata["filename"]
2212 2201 self.ui.debug("adding %s revisions\n" % f)
2213 2202 pr()
2214 2203 fl = self.file(f)
2215 2204 o = len(fl)
2216 2205 if not fl.addgroup(source, revmap, trp):
2217 2206 raise util.Abort(_("received file revlog group is empty"))
2218 2207 revisions += len(fl) - o
2219 2208 files += 1
2220 2209 if f in needfiles:
2221 2210 needs = needfiles[f]
2222 2211 for new in xrange(o, len(fl)):
2223 2212 n = fl.node(new)
2224 2213 if n in needs:
2225 2214 needs.remove(n)
2226 2215 if not needs:
2227 2216 del needfiles[f]
2228 2217 self.ui.progress(_('files'), None)
2229 2218
2230 2219 for f, needs in needfiles.iteritems():
2231 2220 fl = self.file(f)
2232 2221 for n in needs:
2233 2222 try:
2234 2223 fl.rev(n)
2235 2224 except error.LookupError:
2236 2225 raise util.Abort(
2237 2226 _('missing file data for %s:%s - run hg verify') %
2238 2227 (f, hex(n)))
2239 2228
2240 2229 dh = 0
2241 2230 if oldheads:
2242 2231 heads = cl.heads()
2243 2232 dh = len(heads) - len(oldheads)
2244 2233 for h in heads:
2245 2234 if h not in oldheads and self[h].closesbranch():
2246 2235 dh -= 1
2247 2236 htext = ""
2248 2237 if dh:
2249 2238 htext = _(" (%+d heads)") % dh
2250 2239
2251 2240 self.ui.status(_("added %d changesets"
2252 2241 " with %d changes to %d files%s\n")
2253 2242 % (changesets, revisions, files, htext))
2254 2243
2255 2244 if changesets > 0:
2256 2245 p = lambda: cl.writepending() and self.root or ""
2257 2246 self.hook('pretxnchangegroup', throw=True,
2258 2247 node=hex(cl.node(clstart)), source=srctype,
2259 2248 url=url, pending=p)
2260 2249
2261 2250 added = [cl.node(r) for r in xrange(clstart, clend)]
2262 2251 publishing = self.ui.configbool('phases', 'publish', True)
2263 2252 if srctype == 'push':
2264 2253 # Old server can not push the boundary themself.
2265 2254 # New server won't push the boundary if changeset already
2266 2255 # existed locally as secrete
2267 2256 #
2268 2257 # We should not use added here but the list of all change in
2269 2258 # the bundle
2270 2259 if publishing:
2271 2260 phases.advanceboundary(self, phases.public, srccontent)
2272 2261 else:
2273 2262 phases.advanceboundary(self, phases.draft, srccontent)
2274 2263 phases.retractboundary(self, phases.draft, added)
2275 2264 elif srctype != 'strip':
2276 2265 # publishing only alter behavior during push
2277 2266 #
2278 2267 # strip should not touch boundary at all
2279 2268 phases.retractboundary(self, phases.draft, added)
2280 2269
2281 2270 # make changelog see real files again
2282 2271 cl.finalize(trp)
2283 2272
2284 2273 tr.close()
2285 2274
2286 2275 if changesets > 0:
2287 2276 def runhooks():
2288 2277 # forcefully update the on-disk branch cache
2289 2278 self.ui.debug("updating the branch cache\n")
2290 2279 self.updatebranchcache()
2291 2280 self.hook("changegroup", node=hex(cl.node(clstart)),
2292 2281 source=srctype, url=url)
2293 2282
2294 2283 for n in added:
2295 2284 self.hook("incoming", node=hex(n), source=srctype,
2296 2285 url=url)
2297 2286 self._afterlock(runhooks)
2298 2287
2299 2288 finally:
2300 2289 tr.release()
2301 2290 # never return 0 here:
2302 2291 if dh < 0:
2303 2292 return dh - 1
2304 2293 else:
2305 2294 return dh + 1
2306 2295
2307 2296 def stream_in(self, remote, requirements):
2308 2297 lock = self.lock()
2309 2298 try:
2310 2299 fp = remote.stream_out()
2311 2300 l = fp.readline()
2312 2301 try:
2313 2302 resp = int(l)
2314 2303 except ValueError:
2315 2304 raise error.ResponseError(
2316 2305 _('unexpected response from remote server:'), l)
2317 2306 if resp == 1:
2318 2307 raise util.Abort(_('operation forbidden by server'))
2319 2308 elif resp == 2:
2320 2309 raise util.Abort(_('locking the remote repository failed'))
2321 2310 elif resp != 0:
2322 2311 raise util.Abort(_('the server sent an unknown error code'))
2323 2312 self.ui.status(_('streaming all changes\n'))
2324 2313 l = fp.readline()
2325 2314 try:
2326 2315 total_files, total_bytes = map(int, l.split(' ', 1))
2327 2316 except (ValueError, TypeError):
2328 2317 raise error.ResponseError(
2329 2318 _('unexpected response from remote server:'), l)
2330 2319 self.ui.status(_('%d files to transfer, %s of data\n') %
2331 2320 (total_files, util.bytecount(total_bytes)))
2332 2321 handled_bytes = 0
2333 2322 self.ui.progress(_('clone'), 0, total=total_bytes)
2334 2323 start = time.time()
2335 2324 for i in xrange(total_files):
2336 2325 # XXX doesn't support '\n' or '\r' in filenames
2337 2326 l = fp.readline()
2338 2327 try:
2339 2328 name, size = l.split('\0', 1)
2340 2329 size = int(size)
2341 2330 except (ValueError, TypeError):
2342 2331 raise error.ResponseError(
2343 2332 _('unexpected response from remote server:'), l)
2344 2333 if self.ui.debugflag:
2345 2334 self.ui.debug('adding %s (%s)\n' %
2346 2335 (name, util.bytecount(size)))
2347 2336 # for backwards compat, name was partially encoded
2348 2337 ofp = self.sopener(store.decodedir(name), 'w')
2349 2338 for chunk in util.filechunkiter(fp, limit=size):
2350 2339 handled_bytes += len(chunk)
2351 2340 self.ui.progress(_('clone'), handled_bytes,
2352 2341 total=total_bytes)
2353 2342 ofp.write(chunk)
2354 2343 ofp.close()
2355 2344 elapsed = time.time() - start
2356 2345 if elapsed <= 0:
2357 2346 elapsed = 0.001
2358 2347 self.ui.progress(_('clone'), None)
2359 2348 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2360 2349 (util.bytecount(total_bytes), elapsed,
2361 2350 util.bytecount(total_bytes / elapsed)))
2362 2351
2363 2352 # new requirements = old non-format requirements +
2364 2353 # new format-related
2365 2354 # requirements from the streamed-in repository
2366 2355 requirements.update(set(self.requirements) - self.supportedformats)
2367 2356 self._applyrequirements(requirements)
2368 2357 self._writerequirements()
2369 2358
2370 2359 self.invalidate()
2371 2360 return len(self.heads()) + 1
2372 2361 finally:
2373 2362 lock.release()
2374 2363
2375 2364 def clone(self, remote, heads=[], stream=False):
2376 2365 '''clone remote repository.
2377 2366
2378 2367 keyword arguments:
2379 2368 heads: list of revs to clone (forces use of pull)
2380 2369 stream: use streaming clone if possible'''
2381 2370
2382 2371 # now, all clients that can request uncompressed clones can
2383 2372 # read repo formats supported by all servers that can serve
2384 2373 # them.
2385 2374
2386 2375 # if revlog format changes, client will have to check version
2387 2376 # and format flags on "stream" capability, and use
2388 2377 # uncompressed only if compatible.
2389 2378
2390 2379 if not stream:
2391 2380 # if the server explicitely prefer to stream (for fast LANs)
2392 2381 stream = remote.capable('stream-preferred')
2393 2382
2394 2383 if stream and not heads:
2395 2384 # 'stream' means remote revlog format is revlogv1 only
2396 2385 if remote.capable('stream'):
2397 2386 return self.stream_in(remote, set(('revlogv1',)))
2398 2387 # otherwise, 'streamreqs' contains the remote revlog format
2399 2388 streamreqs = remote.capable('streamreqs')
2400 2389 if streamreqs:
2401 2390 streamreqs = set(streamreqs.split(','))
2402 2391 # if we support it, stream in and adjust our requirements
2403 2392 if not streamreqs - self.supportedformats:
2404 2393 return self.stream_in(remote, streamreqs)
2405 2394 return self.pull(remote, heads)
2406 2395
2407 2396 def pushkey(self, namespace, key, old, new):
2408 2397 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2409 2398 old=old, new=new)
2410 2399 ret = pushkey.push(self, namespace, key, old, new)
2411 2400 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2412 2401 ret=ret)
2413 2402 return ret
2414 2403
2415 2404 def listkeys(self, namespace):
2416 2405 self.hook('prelistkeys', throw=True, namespace=namespace)
2417 2406 values = pushkey.list(self, namespace)
2418 2407 self.hook('listkeys', namespace=namespace, values=values)
2419 2408 return values
2420 2409
2421 2410 def debugwireargs(self, one, two, three=None, four=None, five=None):
2422 2411 '''used to test argument passing over the wire'''
2423 2412 return "%s %s %s %s %s" % (one, two, three, four, five)
2424 2413
2425 2414 def savecommitmessage(self, text):
2426 2415 fp = self.opener('last-message.txt', 'wb')
2427 2416 try:
2428 2417 fp.write(text)
2429 2418 finally:
2430 2419 fp.close()
2431 2420 return self.pathto(fp.name[len(self.root)+1:])
2432 2421
2433 2422 # used to avoid circular references so destructors work
2434 2423 def aftertrans(files):
2435 2424 renamefiles = [tuple(t) for t in files]
2436 2425 def a():
2437 2426 for src, dest in renamefiles:
2438 2427 try:
2439 2428 util.rename(src, dest)
2440 2429 except OSError: # journal file does not yet exist
2441 2430 pass
2442 2431 return a
2443 2432
2444 2433 def undoname(fn):
2445 2434 base, name = os.path.split(fn)
2446 2435 assert name.startswith('journal')
2447 2436 return os.path.join(base, name.replace('journal', 'undo', 1))
2448 2437
2449 2438 def instance(ui, path, create):
2450 2439 return localrepository(ui, util.urllocalpath(path), create)
2451 2440
2452 2441 def islocal(path):
2453 2442 return True
@@ -1,279 +1,288
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23
24 24 Format
25 25 ------
26 26
27 27 Markers are stored in an append-only file stored in
28 28 '.hg/store/obsstore'.
29 29
30 30 The file starts with a version header:
31 31
32 32 - 1 unsigned byte: version number, starting at zero.
33 33
34 34
35 35 The header is followed by the markers. Each marker is made of:
36 36
37 37 - 1 unsigned byte: number of new changesets "R", could be zero.
38 38
39 39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
40 40
41 41 - 1 byte: a bit field. It is reserved for flags used in obsolete
42 42 markers common operations, to avoid repeated decoding of metadata
43 43 entries.
44 44
45 45 - 20 bytes: obsoleted changeset identifier.
46 46
47 47 - N*20 bytes: new changesets identifiers.
48 48
49 49 - M bytes: metadata as a sequence of nul-terminated strings. Each
50 50 string contains a key and a value, separated by a color ':', without
51 51 additional encoding. Keys cannot contain '\0' or ':' and values
52 52 cannot contain '\0'.
53 53 """
54 54 import struct
55 55 from mercurial import util, base85
56 56 from i18n import _
57 57
58 58 _pack = struct.pack
59 59 _unpack = struct.unpack
60 60
61 61
62 62
63 63 # data used for parsing and writing
64 64 _fmversion = 0
65 65 _fmfixed = '>BIB20s'
66 66 _fmnode = '20s'
67 67 _fmfsize = struct.calcsize(_fmfixed)
68 68 _fnodesize = struct.calcsize(_fmnode)
69 69
70 70 def _readmarkers(data):
71 71 """Read and enumerate markers from raw data"""
72 72 off = 0
73 73 diskversion = _unpack('>B', data[off:off + 1])[0]
74 74 off += 1
75 75 if diskversion != _fmversion:
76 76 raise util.Abort(_('parsing obsolete marker: unknown version %r')
77 77 % diskversion)
78 78
79 79 # Loop on markers
80 80 l = len(data)
81 81 while off + _fmfsize <= l:
82 82 # read fixed part
83 83 cur = data[off:off + _fmfsize]
84 84 off += _fmfsize
85 85 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
86 86 # read replacement
87 87 sucs = ()
88 88 if nbsuc:
89 89 s = (_fnodesize * nbsuc)
90 90 cur = data[off:off + s]
91 91 sucs = _unpack(_fmnode * nbsuc, cur)
92 92 off += s
93 93 # read metadata
94 94 # (metadata will be decoded on demand)
95 95 metadata = data[off:off + mdsize]
96 96 if len(metadata) != mdsize:
97 97 raise util.Abort(_('parsing obsolete marker: metadata is too '
98 98 'short, %d bytes expected, got %d')
99 99 % (len(metadata), mdsize))
100 100 off += mdsize
101 101 yield (pre, sucs, flags, metadata)
102 102
103 103 def encodemeta(meta):
104 104 """Return encoded metadata string to string mapping.
105 105
106 106 Assume no ':' in key and no '\0' in both key and value."""
107 107 for key, value in meta.iteritems():
108 108 if ':' in key or '\0' in key:
109 109 raise ValueError("':' and '\0' are forbidden in metadata key'")
110 110 if '\0' in value:
111 111 raise ValueError("':' are forbidden in metadata value'")
112 112 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
113 113
114 114 def decodemeta(data):
115 115 """Return string to string dictionary from encoded version."""
116 116 d = {}
117 117 for l in data.split('\0'):
118 118 if l:
119 119 key, value = l.split(':')
120 120 d[key] = value
121 121 return d
122 122
123 123 class marker(object):
124 124 """Wrap obsolete marker raw data"""
125 125
126 126 def __init__(self, repo, data):
127 127 # the repo argument will be used to create changectx in later version
128 128 self._repo = repo
129 129 self._data = data
130 130 self._decodedmeta = None
131 131
132 132 def precnode(self):
133 133 """Precursor changeset node identifier"""
134 134 return self._data[0]
135 135
136 136 def succnodes(self):
137 137 """List of successor changesets node identifiers"""
138 138 return self._data[1]
139 139
140 140 def metadata(self):
141 141 """Decoded metadata dictionary"""
142 142 if self._decodedmeta is None:
143 143 self._decodedmeta = decodemeta(self._data[3])
144 144 return self._decodedmeta
145 145
146 146 def date(self):
147 147 """Creation date as (unixtime, offset)"""
148 148 parts = self.metadata()['date'].split(' ')
149 149 return (float(parts[0]), int(parts[1]))
150 150
151 151 class obsstore(object):
152 152 """Store obsolete markers
153 153
154 154 Markers can be accessed with two mappings:
155 155 - precursors: old -> set(new)
156 156 - successors: new -> set(old)
157 157 """
158 158
159 def __init__(self):
159 def __init__(self, sopener):
160 160 self._all = []
161 161 # new markers to serialize
162 162 self._new = []
163 163 self.precursors = {}
164 164 self.successors = {}
165 self.sopener = sopener
166 data = sopener.tryread('obsstore')
167 if data:
168 for marker in _readmarkers(data):
169 self._load(marker)
165 170
166 171 def __iter__(self):
167 172 return iter(self._all)
168 173
169 174 def __nonzero__(self):
170 175 return bool(self._all)
171 176
172 177 def create(self, prec, succs=(), flag=0, metadata=None):
173 178 """obsolete: add a new obsolete marker
174 179
175 180 * ensuring it is hashable
176 181 * check mandatory metadata
177 182 * encode metadata
178 183 """
179 184 if metadata is None:
180 185 metadata = {}
181 186 if len(prec) != 20:
182 187 raise ValueError(prec)
183 188 for succ in succs:
184 189 if len(succ) != 20:
185 190 raise ValueError(succ)
186 191 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
187 192 self.add(marker)
188 193
189 194 def add(self, marker):
190 195 """Add a new marker to the store
191 196
192 197 This marker still needs to be written to disk"""
193 198 self._new.append(marker)
194 199 self._load(marker)
195 200
196 def loadmarkers(self, data):
197 """Load all markers in data, mark them as known."""
198 for marker in _readmarkers(data):
199 self._load(marker)
200
201 201 def mergemarkers(self, data):
202 202 other = set(_readmarkers(data))
203 203 local = set(self._all)
204 204 new = other - local
205 205 for marker in new:
206 206 self.add(marker)
207 207
208 def flushmarkers(self, stream):
209 """Write all markers to a stream
208 def flushmarkers(self):
209 """Write all markers on disk
210 210
211 211 After this operation, "new" markers are considered "known"."""
212 self._writemarkers(stream)
213 self._new[:] = []
212 if self._new:
213 # XXX: transaction logic should be used here. But for
214 # now rewriting the whole file is good enough.
215 f = self.sopener('obsstore', 'wb', atomictemp=True)
216 try:
217 self._writemarkers(f)
218 f.close()
219 self._new[:] = []
220 except: # re-raises
221 f.discard()
222 raise
214 223
215 224 def _load(self, marker):
216 225 self._all.append(marker)
217 226 pre, sucs = marker[:2]
218 227 self.precursors.setdefault(pre, set()).add(marker)
219 228 for suc in sucs:
220 229 self.successors.setdefault(suc, set()).add(marker)
221 230
222 231 def _writemarkers(self, stream=None):
223 232 # Kept separate from flushmarkers(), it will be reused for
224 233 # markers exchange.
225 234 if stream is None:
226 235 final = []
227 236 w = final.append
228 237 else:
229 238 w = stream.write
230 239 w(_pack('>B', _fmversion))
231 240 for marker in self._all:
232 241 pre, sucs, flags, metadata = marker
233 242 nbsuc = len(sucs)
234 243 format = _fmfixed + (_fmnode * nbsuc)
235 244 data = [nbsuc, len(metadata), flags, pre]
236 245 data.extend(sucs)
237 246 w(_pack(format, *data))
238 247 w(metadata)
239 248 if stream is None:
240 249 return ''.join(final)
241 250
242 251 def listmarkers(repo):
243 252 """List markers over pushkey"""
244 253 if not repo.obsstore:
245 254 return {}
246 255 data = repo.obsstore._writemarkers()
247 256 return {'dump': base85.b85encode(data)}
248 257
249 258 def pushmarker(repo, key, old, new):
250 259 """Push markers over pushkey"""
251 260 if key != 'dump':
252 261 repo.ui.warn(_('unknown key: %r') % key)
253 262 return 0
254 263 if old:
255 264 repo.ui.warn(_('unexpected old value') % key)
256 265 return 0
257 266 data = base85.b85decode(new)
258 267 lock = repo.lock()
259 268 try:
260 269 repo.obsstore.mergemarkers(data)
261 270 return 1
262 271 finally:
263 272 lock.release()
264 273
265 274 def allmarkers(repo):
266 275 """all obsolete markers known in a repository"""
267 276 for markerdata in repo.obsstore:
268 277 yield marker(repo, markerdata)
269 278
270 279 def precursormarkers(ctx):
271 280 """obsolete marker making this changeset obsolete"""
272 281 for data in ctx._repo.obsstore.precursors.get(ctx.node(), ()):
273 282 yield marker(ctx._repo, data)
274 283
275 284 def successormarkers(ctx):
276 285 """obsolete marker marking this changeset as a successors"""
277 286 for data in ctx._repo.obsstore.successors.get(ctx.node(), ()):
278 287 yield marker(ctx._repo, data)
279 288
General Comments 0
You need to be logged in to leave comments. Login now